code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import requests
_snake_case : Optional[Any] = "YOUR API KEY"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = giphy_api_key ):
__snake_case : Optional[int] = "+".join(query.split() )
__snake_case : List[Any] = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
__snake_case : Any = requests.get(__lowerCamelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 81 |
'''simple docstring'''
def A ():
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 5 | 0 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowerCamelCase = True
from torch.cuda.amp import autocast
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
UpperCamelCase = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
UpperCamelCase = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
UpperCamelCase = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase_ = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase_ = logging.INFO
logger.setLevel(lowerCAmelCase__ )
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCamelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
UpperCamelCase = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "longest"
UpperCamelCase = None
UpperCamelCase = None
def __call__( self : Optional[int] , _UpperCAmelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = self.feature_extractor.pad(
_UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
UpperCAmelCase_ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
UpperCAmelCase_ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase_ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
UpperCAmelCase_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase_ = 1
UpperCAmelCase_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCAmelCase , min_masks=2 , )
return batch
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Optional[Any]=1.0 , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = 0
UpperCAmelCase_ = max_gumbel_temp
UpperCAmelCase_ = min_gumbel_temp
UpperCAmelCase_ = gumbel_temp_decay
def lowercase__ ( self : List[Any] , _UpperCAmelCase : nn.Module , _UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
UpperCAmelCase_ = self._prepare_inputs(_UpperCAmelCase )
if self.use_amp:
with autocast():
UpperCAmelCase_ = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
else:
UpperCAmelCase_ = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase_ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
configure_logger(lowerCAmelCase__ , lowerCAmelCase__ )
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ = DatasetDict()
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ = DatasetDict()
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCAmelCase__ )
def prepare_dataset(lowerCAmelCase__ ):
# check that all files have the correct sampling rate
UpperCAmelCase_ , UpperCAmelCase_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase_ = datasets.map(
lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
UpperCAmelCase_ = vectorized_datasets.filter(
lambda lowerCAmelCase__ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowerCAmelCase__ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase_ = vectorized_datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
UpperCAmelCase_ = WavaVecaForPreTraining(lowerCAmelCase__ )
UpperCAmelCase_ = DataCollatorForWavaVecaPretraining(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaPreTrainer(
model=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=lowerCAmelCase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 82 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 5 | 0 |
"""simple docstring"""
lowerCAmelCase__ = 0 # The first color of the flag.
lowerCAmelCase__ = 1 # The second color of the flag.
lowerCAmelCase__ = 2 # The third color of the flag.
lowerCAmelCase__ = (red, white, blue)
def snake_case_ ( A_ : list ):
'''simple docstring'''
if not sequence:
return []
if len(A_ ) == 1:
return list(A_ )
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Tuple = len(A_ ) - 1
_lowerCamelCase : List[str] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : Optional[int] = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : List[Any] = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(A_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by commas:\n''').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 83 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 5 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class A_ :
'''simple docstring'''
def __init__( self ):
lowercase = ''
lowercase = ''
lowercase = []
lowercase = 0
lowercase = 256
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = cva.imread(snake_case , 0 )
lowercase = copy.deepcopy(self.img )
lowercase , lowercase , lowercase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
lowercase = np.sum(snake_case )
for i in range(len(snake_case ) ):
lowercase = x[i] / self.k
self.sk += prk
lowercase = (self.L - 1) * self.sk
if self.rem != 0:
lowercase = int(last % last )
lowercase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case )
lowercase = int(np.ma.count(self.img ) / self.img[1].size )
lowercase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase = self.img[j][i]
if num != self.last_list[num]:
lowercase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def SCREAMING_SNAKE_CASE__ ( self ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def SCREAMING_SNAKE_CASE__ ( self ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCAmelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 84 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase = BeautifulSoup(res.text, """html.parser""")
_lowercase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 5 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[str] , a_ : Any )-> str:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(a_ )
def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]:
"""simple docstring"""
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ )
SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(
a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ )
print(a_ )
self.assertGreaterEqual(scores['bleu'] , a_ )
| 85 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A ():
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_lowerCAmelCase = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 5 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__a :Any = ''
__a :int = ''
__a :str = ''
__a :Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def __snake_case ( ):
"""simple docstring"""
A_ , A_ = get_dataset(__UpperCamelCase ,__UpperCamelCase )
print("Processing..." )
A_ , A_ , A_ = update_image_and_anno(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A_ = random_chars(32 )
A_ = paths[index].split(os.sep )[-1].rsplit("." ,1 )[0]
A_ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' ,__UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' )
A_ = []
for anno in new_annos[index]:
A_ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__UpperCamelCase )
with open(f'''/{file_root}.txt''' ,"w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = []
A_ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase ,"*.txt" ) ):
A_ = label_file.split(os.sep )[-1].rsplit("." ,1 )[0]
with open(__UpperCamelCase ) as in_file:
A_ = in_file.readlines()
A_ = os.path.join(__UpperCamelCase ,f'''{label_name}.jpg''' )
A_ = []
for obj_list in obj_lists:
A_ = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ,__UpperCamelCase : int = 1 ):
"""simple docstring"""
A_ = []
A_ = []
A_ = []
for idx in range(len(__UpperCamelCase ) ):
A_ = []
A_ = img_list[idx]
path_list.append(__UpperCamelCase )
A_ = anno_list[idx]
A_ = cva.imread(__UpperCamelCase )
if flip_type == 1:
A_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
A_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
A_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def __snake_case ( __UpperCamelCase : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
A_ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅') | 86 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowerCamelCase : Optional[int] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_lowerCamelCase : Any = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
_lowerCamelCase : Optional[int] = """|""".join(sys.argv[1:])
_lowerCamelCase : str = re.compile(rF'''^({joined_dirs}).*?\.py$''')
_lowerCamelCase : List[str] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 87 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def _snake_case ( __snake_case : str = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _snake_case ( __snake_case : str = "" ):
"""simple docstring"""
if len(__snake_case ) == 0:
return True
_lowerCamelCase : Union[str, Any] = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCamelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCamelCase : str = character_freq_dict.get(__snake_case , 0 ) + 1
_lowerCamelCase : Optional[int] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _snake_case ( __snake_case : str = "" ):
"""simple docstring"""
print("""\nFor string = """ , __snake_case , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(__snake_case ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(__snake_case ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
UpperCAmelCase = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
UpperCAmelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 88 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
SCREAMING_SNAKE_CASE : Optional[int] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
for attribute in key.split('.' ):
_lowercase : Optional[int] = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
_lowercase : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
_lowercase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_lowercase : Union[str, Any] = value
elif weight_type == "weight_g":
_lowercase : str = value
elif weight_type == "weight_v":
_lowercase : Optional[int] = value
elif weight_type == "bias":
_lowercase : str = value
else:
_lowercase : Union[str, Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = []
_lowercase : List[Any] = fairseq_model.state_dict()
_lowercase : Dict = hf_model.feature_extractor
_lowercase : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
_lowercase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : Optional[Any] = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowercase : Tuple = True
if "*" in mapped_key:
_lowercase : Optional[Any] = name.split(lowerCamelCase_ )[0].split('.' )[-2]
_lowercase : Dict = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
_lowercase : List[Any] = 'weight_g'
elif "weight_v" in name:
_lowercase : Tuple = 'weight_v'
elif "bias" in name:
_lowercase : Dict = 'bias'
elif "weight" in name:
_lowercase : Optional[Any] = 'weight'
else:
_lowercase : str = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : int = full_name.split('conv_layers.' )[-1]
_lowercase : str = name.split('.' )
_lowercase : str = int(items[0] )
_lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_lowercase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_lowercase : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_lowercase : Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_lowercase : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : Optional[Any] = full_name.split('adaptor.' )[-1]
_lowercase : List[str] = name.split('.' )
if items[1].isdigit():
_lowercase : Any = int(items[1] )
else:
_lowercase : Optional[int] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_lowercase : Dict = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_lowercase : List[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_lowercase : List[str] = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_lowercase : Any = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_lowercase : List[str] = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_lowercase : str = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : List[str] = emb.weight.shape
_lowercase : Tuple = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
_lowercase : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Union[str, Any]:
_lowercase : Any = WavaVecaConfig.from_pretrained(
lowerCamelCase_ , add_adapter=lowerCamelCase_ , adapter_stride=lowerCamelCase_ , adapter_kernel_size=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , output_hidden_size=lowerCamelCase_ , )
_lowercase : int = MBartConfig.from_pretrained(lowerCamelCase_ )
# load model
_lowercase , _lowercase , _lowercase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
_lowercase : Dict = model[0].eval()
# load feature extractor
_lowercase : Tuple = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase_ , use_auth_token=lowerCamelCase_ )
# set weights for wav2vec2 encoder
_lowercase : str = WavaVecaModel(lowerCamelCase_ )
recursively_load_weights_wavaveca(model.encoder , lowerCamelCase_ )
# load decoder weights
_lowercase : List[str] = MBartForCausalLM(lowerCamelCase_ )
_lowercase , _lowercase : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase_ )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_lowercase : Optional[Any] = SpeechEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
_lowercase : int = False
_lowercase : str = MBartaaTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
_lowercase : Tuple = hf_wavavec.config.to_dict()
_lowercase : int = tokenizer.pad_token_id
_lowercase : Optional[Any] = tokenizer.bos_token_id
_lowercase : int = tokenizer.eos_token_id
_lowercase : Tuple = 'mbart50'
_lowercase : Tuple = 'wav2vec2'
_lowercase : Dict = tokenizer.eos_token_id
_lowercase : List[Any] = 25_0004
_lowercase : int = tokenizer.eos_token_id
_lowercase : Any = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
feature_extractor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 89 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = ['''input_values''', '''padding_mask''']
def __init__( self , _lowercase = 1 , _lowercase = 24_000 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , **_lowercase , ):
"""simple docstring"""
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _lowercase , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
_lowerCAmelCase = True
_lowerCAmelCase = bool(
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_lowerCAmelCase = [np.asarray(_lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray(_lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(_lowercase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
_lowerCAmelCase = None
_lowerCAmelCase = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowerCAmelCase = min(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.floor(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowerCAmelCase = max(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.ceil(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowerCAmelCase = """max_length"""
else:
_lowerCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
_lowerCAmelCase = self.pad(
_lowercase , max_length=_lowercase , truncation=_lowercase , padding=_lowercase , return_attention_mask=_lowercase , )
if padding:
_lowerCAmelCase = padded_inputs.pop("""attention_mask""" )
_lowerCAmelCase = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
_lowerCAmelCase = example[..., None]
input_values.append(example.T )
_lowerCAmelCase = input_values
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 5 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( A , A ) -> Union[str, Any]:
assert isinstance(A , A )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _snake_case ( A , A , A ) -> Any:
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A , keep_in_memory=A ).read()
_check_text_dataset(A , A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _snake_case ( A , A , A ) -> int:
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = TextDatasetReader(A , features=A , cache_dir=A ).read()
_check_text_dataset(A , A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _snake_case ( A , A , A ) -> List[str]:
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A , split=A ).read()
_check_text_dataset(A , A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _snake_case ( A , A , A ) -> str:
if issubclass(A , A ):
lowerCAmelCase__ = text_path
elif issubclass(A , A ):
lowerCAmelCase__ = [text_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A ).read()
_check_text_dataset(A , A )
def _snake_case ( A , A , A=("train",) ) -> Any:
assert isinstance(A , A )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _snake_case ( A , A , A ) -> Tuple:
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = TextDatasetReader({'''train''': text_path} , cache_dir=A , keep_in_memory=A ).read()
_check_text_datasetdict(A , A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase__ = {'''text''': '''string'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = TextDatasetReader({'''train''': text_path} , features=A , cache_dir=A ).read()
_check_text_datasetdict(A , A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _snake_case ( A , A , A ) -> Any:
if split:
lowerCAmelCase__ = {split: text_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A ).read()
_check_text_datasetdict(A , A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 90 |
'''simple docstring'''
_lowercase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5 | 0 |
"""simple docstring"""
import os
def _snake_case ( ):
A = os.path.dirname(os.path.realpath(snake_case__ ) )
A = os.path.join(snake_case__ , 'triangle.txt' )
with open(snake_case__ ) as f:
A = f.readlines()
A = []
for line in triangle:
A = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
for j in range(len(a[i] ) ):
A = a[i - 1][j] if j != len(a[i - 1] ) else 0
A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ , snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 91 |
'''simple docstring'''
import functools
def A (__lowerCamelCase :list[int] , __lowerCamelCase :list[int] ):
# Validation
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__lowerCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
_lowerCAmelCase = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase :int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
UpperCamelCase_ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCamelCase_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _lowerCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : Dict =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , __magic_name__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase : Any =training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
datasets.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase : int =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase : Union[str, Any] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase : int =load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase : Optional[Any] =load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : int =train_dataset.features['''label'''].names
if training_args.do_eval:
lowercase : Any =load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : Optional[int] =eval_dataset.features['''label'''].names
if training_args.do_predict:
lowercase : Tuple =load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : Tuple =predict_dataset.features['''label'''].names
# Labels
lowercase : Tuple =len(__magic_name__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : Any =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , idalabel={str(__magic_name__ ): label for i, label in enumerate(__magic_name__ )} , labelaid={label: i for i, label in enumerate(__magic_name__ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : List[Any] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : List[str] =AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase : Tuple ='''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase : List[str] =False
def preprocess_function(__magic_name__ : List[str] ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=__magic_name__ , max_length=data_args.max_seq_length , truncation=__magic_name__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase : Optional[int] =min(len(__magic_name__ ) , data_args.max_train_samples )
lowercase : Optional[int] =train_dataset.select(range(__magic_name__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowercase : int =train_dataset.map(
__magic_name__ , batched=__magic_name__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__magic_name__ ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase : Any =min(len(__magic_name__ ) , data_args.max_eval_samples )
lowercase : Optional[Any] =eval_dataset.select(range(__magic_name__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowercase : Any =eval_dataset.map(
__magic_name__ , batched=__magic_name__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase : int =min(len(__magic_name__ ) , data_args.max_predict_samples )
lowercase : List[Any] =predict_dataset.select(range(__magic_name__ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowercase : Optional[Any] =predict_dataset.map(
__magic_name__ , batched=__magic_name__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowercase : str =evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ : EvalPrediction ):
lowercase : str =p.predictions[0] if isinstance(p.predictions , __magic_name__ ) else p.predictions
lowercase : Optional[Any] =np.argmax(__magic_name__ , axis=1 )
return metric.compute(predictions=__magic_name__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase : str =default_data_collator
elif training_args.fpaa:
lowercase : str =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 )
else:
lowercase : Dict =None
# Initialize our Trainer
lowercase : Any =Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
lowercase : Union[str, Any] =None
if training_args.resume_from_checkpoint is not None:
lowercase : Optional[Any] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase : Optional[int] =last_checkpoint
lowercase : Any =trainer.train(resume_from_checkpoint=__magic_name__ )
lowercase : Any =train_result.metrics
lowercase : List[str] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(__magic_name__ )
)
lowercase : Tuple =min(__magic_name__ , len(__magic_name__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , __magic_name__ )
trainer.save_metrics('''train''' , __magic_name__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : List[Any] =trainer.evaluate(eval_dataset=__magic_name__ )
lowercase : Optional[int] =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__magic_name__ )
lowercase : List[str] =min(__magic_name__ , len(__magic_name__ ) )
trainer.log_metrics('''eval''' , __magic_name__ )
trainer.save_metrics('''eval''' , __magic_name__ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowercase , lowercase , lowercase : List[Any] =trainer.predict(__magic_name__ , metric_key_prefix='''predict''' )
lowercase : List[Any] =(
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__magic_name__ )
)
lowercase : Tuple =min(__magic_name__ , len(__magic_name__ ) )
trainer.log_metrics('''predict''' , __magic_name__ )
trainer.save_metrics('''predict''' , __magic_name__ )
lowercase : Union[str, Any] =np.argmax(__magic_name__ , axis=1 )
lowercase : int =os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(__magic_name__ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__magic_name__ ):
lowercase : Optional[Any] =label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 92 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 384
if "tiny" in model_name:
_lowerCAmelCase = [3, 3, 9, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [128, 256, 512, 1024]
_lowerCAmelCase = 512
if "large" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [192, 384, 768, 1536]
_lowerCAmelCase = 768
if "xlarge" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [256, 512, 1024, 2048]
_lowerCAmelCase = 1024
# set label information
_lowerCAmelCase = 150
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """ade20k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = ConvNextConfig(
depths=__lowerCamelCase , hidden_sizes=__lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
_lowerCAmelCase = UperNetConfig(
backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def A (__lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Dict , __lowerCamelCase :Tuple ):
_lowerCAmelCase = dct.pop(__lowerCamelCase )
_lowerCAmelCase = val
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Any ):
_lowerCAmelCase = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""state_dict"""]
_lowerCAmelCase = get_upernet_config(__lowerCamelCase )
_lowerCAmelCase = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(__lowerCamelCase )
if "bn" in key:
_lowerCAmelCase = key.replace("""bn""" , """batch_norm""" )
_lowerCAmelCase = val
# rename keys
_lowerCAmelCase = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
_lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_lowerCAmelCase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
_lowerCAmelCase = SegformerImageProcessor()
_lowerCAmelCase = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_lowerCAmelCase = model(__lowerCamelCase )
if model_name == "upernet-convnext-tiny":
_lowerCAmelCase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_lowerCAmelCase = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_lowerCAmelCase = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_lowerCAmelCase = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_lowerCAmelCase = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 5 | 0 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = (EulerDiscreteScheduler,)
__magic_name__ :Any = 10
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__UpperCAmelCase )
return config
def snake_case ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ :int = self.get_scheduler_config()
lowerCAmelCase__ :Optional[Any] = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ :str = torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = self.dummy_model()
lowerCAmelCase__ :Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ :List[Any] = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ :Tuple = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
lowerCAmelCase__ :Any = output.prev_sample
lowerCAmelCase__ :int = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ :Union[str, Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.scheduler_classes[0]
lowerCAmelCase__ :Any = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ :int = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ :int = torch.manual_seed(0 )
lowerCAmelCase__ :int = self.dummy_model()
lowerCAmelCase__ :Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ :Any = sample.to(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ :List[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
lowerCAmelCase__ :Any = output.prev_sample
lowerCAmelCase__ :Tuple = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 0.00_02 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.scheduler_classes[0]
lowerCAmelCase__ :Tuple = self.get_scheduler_config()
lowerCAmelCase__ :Dict = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ :int = torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = self.dummy_model()
lowerCAmelCase__ :int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase__ :Dict = sample.to(__UpperCAmelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ :Optional[int] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = output.prev_sample
lowerCAmelCase__ :Optional[int] = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ :List[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ :str = self.get_scheduler_config()
lowerCAmelCase__ :Optional[Any] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ :str = self.dummy_model()
lowerCAmelCase__ :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase__ :Tuple = sample.to(__UpperCAmelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ :Any = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = model(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
lowerCAmelCase__ :Dict = output.prev_sample
lowerCAmelCase__ :str = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ :str = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3
| 93 |
'''simple docstring'''
from itertools import product
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
_lowerCAmelCase = sides_number
_lowerCAmelCase = max_face_number * dice_number
_lowerCAmelCase = [0] * (max_total + 1)
_lowerCAmelCase = 1
_lowerCAmelCase = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
_lowerCAmelCase = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def A ():
_lowerCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCAmelCase = 0
_lowerCAmelCase = 9
_lowerCAmelCase = 4 * 9
_lowerCAmelCase = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCAmelCase = (4**9) * (6**6)
_lowerCAmelCase = peter_wins_count / total_games_number
_lowerCAmelCase = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 | 0 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
SCREAMING_SNAKE_CASE = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
SCREAMING_SNAKE_CASE = {
'jukebox': 512,
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_LYRIC_TOKENS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict=["v3", "v2", "v2"] , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **UpperCAmelCase : Tuple , ) -> int:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
super().__init__(
unk_token=UpperCAmelCase , n_genres=UpperCAmelCase , version=UpperCAmelCase , max_n_lyric_tokens=UpperCAmelCase , **UpperCAmelCase , )
lowercase : int =version
lowercase : Optional[int] =max_n_lyric_tokens
lowercase : Dict =n_genres
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : Union[str, Any] =json.load(UpperCAmelCase )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : List[str] =json.load(UpperCAmelCase )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : Union[str, Any] =json.load(UpperCAmelCase )
lowercase : Union[str, Any] =R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowercase : Dict =oov.replace(R'''\-\'''' , R'''\-+\'''' )
lowercase : str =regex.compile(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.artists_encoder.items()}
lowercase : Tuple ={v: k for k, v in self.genres_encoder.items()}
lowercase : Tuple ={v: k for k, v in self.lyrics_encoder.items()}
@property
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def A__ ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] =[self.artists_encoder.get(UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase ) ):
lowercase : List[str] =[self.genres_encoder.get(UpperCAmelCase , 0 ) for genre in list_genres[genres]]
lowercase : List[Any] =list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowercase : Any =[[self.lyrics_encoder.get(UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def A__ ( self : Dict , UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
return list(UpperCAmelCase )
def A__ ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , **UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase , lowercase , lowercase : List[Any] =self.prepare_for_tokenization(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[int] =self._tokenize(UpperCAmelCase )
return artist, genre, lyrics
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowercase : Optional[int] =artists[idx].lower()
lowercase : List[Any] =[genres[idx].lower()]
else:
lowercase : Dict =self._normalize(artists[idx] ) + '''.v2'''
lowercase : List[str] =[
self._normalize(UpperCAmelCase ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowercase : Optional[int] =regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
lowercase : List[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
lowercase : Any ={vocab[index]: index + 1 for index in range(len(UpperCAmelCase ) )}
lowercase : Tuple =0
lowercase : Dict =len(UpperCAmelCase ) + 1
lowercase : Any =self.vocab
lowercase : List[str] ={v: k for k, v in self.vocab.items()}
lowercase : List[str] =''''''
else:
lowercase : Any =regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
lowercase : Union[str, Any] =self._run_strip_accents(UpperCAmelCase )
lowercase : List[str] =lyrics.replace('''\\''' , '''\n''' )
lowercase : Any =self.out_of_vocab.sub('''''' , UpperCAmelCase ), [], []
return artists, genres, lyrics
def A__ ( self : Optional[int] , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =unicodedata.normalize('''NFD''' , UpperCAmelCase )
lowercase : Tuple =[]
for char in text:
lowercase : List[str] =unicodedata.category(UpperCAmelCase )
if cat == "Mn":
continue
output.append(UpperCAmelCase )
return "".join(UpperCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =(
[chr(UpperCAmelCase ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(UpperCAmelCase ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(UpperCAmelCase ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
lowercase : Dict =frozenset(UpperCAmelCase )
lowercase : Any =re.compile(R'''_+''' )
lowercase : Optional[Any] =''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
lowercase : Any =pattern.sub('''_''' , UpperCAmelCase ).strip('''_''' )
return text
def A__ ( self : List[Any] , UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
return " ".join(UpperCAmelCase )
def A__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : bool = False ) -> Dict:
'''simple docstring'''
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Dict =TensorType(UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
lowercase : Union[str, Any] =tf.constant
lowercase : Tuple =tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
lowercase : str =torch.tensor
lowercase : Union[str, Any] =torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
lowercase : List[Any] =jnp.array
lowercase : Optional[Any] =_is_jax
else:
lowercase : str =np.asarray
lowercase : Optional[Any] =_is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowercase : Tuple =[inputs]
if not is_tensor(UpperCAmelCase ):
lowercase : Optional[Any] =as_tensor(UpperCAmelCase )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any]="" , UpperCAmelCase : Any="pt" ) -> BatchEncoding:
'''simple docstring'''
lowercase : Tuple =[0, 0, 0]
lowercase : Union[str, Any] =[artist] * len(self.version )
lowercase : List[str] =[genres] * len(self.version )
lowercase , lowercase , lowercase : Any =self.tokenize(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase , lowercase , lowercase : Union[str, Any] =self._convert_token_to_id(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Dict =[-INFINITY] * len(full_tokens[-1] )
lowercase : Optional[Any] =[
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def A__ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Tuple =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase ) )
lowercase : Optional[int] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase ) )
lowercase : Any =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.artists_decoder.get(UpperCAmelCase )
lowercase : Union[str, Any] =[self.genres_decoder.get(UpperCAmelCase ) for genre in genres_index]
lowercase : List[Any] =[self.lyrics_decoder.get(UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 94 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(1 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.align_to(_lowercase , _lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , )
_lowerCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=2.5 ) , Write(_lowercase ) , Write(_lowercase ) )
self.add(_lowercase )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
_lowerCAmelCase = 0.46 / 4
_lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowercase , buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 5 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase_ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 95 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase = False
try:
_lowercase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase = None , _lowercase = [] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = choices
_lowerCAmelCase = prompt
if sys.platform == "win32":
_lowerCAmelCase = """*"""
else:
_lowerCAmelCase = """➔ """
def _lowercase ( self , _lowercase , _lowercase = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowercase )
else:
forceWrite(self.choices[index] , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowercase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _lowercase ( self , _lowercase , _lowercase = 1 ):
"""simple docstring"""
_lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = int(chr(self.current_selection ) )
_lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowercase )
else:
return
else:
return
def _lowercase ( self , _lowercase = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase = int(builtins.input() )
except ValueError:
_lowerCAmelCase = default_choice
else:
_lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_lowercase , """\n""" )
return choice
| 5 | 0 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = '.'
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = '\n'.join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 96 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_lowercase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def A (__lowerCamelCase :Optional[int] ):
_lowerCAmelCase = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
_lowerCAmelCase = line.strip()
if line:
_lowerCAmelCase = line.split()
_lowerCAmelCase = line_number
_lowerCAmelCase = words[0]
_lowerCAmelCase = value
return result
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any] , __lowerCamelCase :List[str] ):
for attribute in key.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase = value[0]
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Dict , __lowerCamelCase :List[Any] , __lowerCamelCase :int ):
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = """.""".join([key, hf_param_name] )
else:
_lowerCAmelCase = key
_lowerCAmelCase = value if """lm_head""" in full_key else value[0]
_lowercase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def A (__lowerCamelCase :Any , __lowerCamelCase :int , __lowerCamelCase :List[str]=None , __lowerCamelCase :List[Any]=None ):
_lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase = """weight_v"""
elif "bias" in name:
_lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = """weight"""
else:
_lowerCAmelCase = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def A (__lowerCamelCase :Any , __lowerCamelCase :Dict , __lowerCamelCase :Dict ):
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase = True
else:
_lowerCAmelCase = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[int] , __lowerCamelCase :Any , __lowerCamelCase :List[Any] , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase = name.split(""".""" )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any]=None , __lowerCamelCase :Union[str, Any]=None , __lowerCamelCase :str=True , __lowerCamelCase :str=False ):
if config_path is not None:
_lowerCAmelCase = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase = read_txt_into_dict(__lowerCamelCase )
_lowerCAmelCase = idalabel
_lowerCAmelCase = WavaVecaForSequenceClassification(__lowerCamelCase )
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 0
_lowerCAmelCase = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_lowerCAmelCase = WavaVecaForCTC(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase = fairseq.tasks.setup_task(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_lowercase = parser.parse_args()
_lowercase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 5 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ) -> Optional[Any]:
# test for the above condition
self.test()
def _lowercase ( self : Tuple ) -> int:
lowercase_ = 0
lowercase_ = False
while not completed:
if counter == 1:
self.reset()
lowercase_ = self.advance()
if not self.does_advance(SCREAMING_SNAKE_CASE_ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase_ , lowercase_ , lowercase_ = self.update(SCREAMING_SNAKE_CASE_ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def _lowercase ( self : int ) -> str:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : Optional[Any] ) -> Dict:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : int ) -> Any:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> str:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[int] ) -> Any:
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowercase_ = token_ids
lowercase_ = len(self.token_ids )
lowercase_ = -1 # the index of the currently fulfilled step
lowercase_ = False
def _lowercase ( self : int ) -> List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}''' )
lowercase_ = False
lowercase_ = False
lowercase_ = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.fulfilled_idx += 1
lowercase_ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase_ = True
lowercase_ = completed
else:
# failed to make progress.
lowercase_ = True
self.reset()
return stepped, completed, reset
def _lowercase ( self : str ) -> Dict:
lowercase_ = False
lowercase_ = 0
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=False ) -> Optional[Any]:
lowercase_ = PhrasalConstraint(self.token_ids )
if stateful:
lowercase_ = self.seqlen
lowercase_ = self.fulfilled_idx
lowercase_ = self.completed
return new_constraint
class lowercase__:
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : List[List[int]] , SCREAMING_SNAKE_CASE_ : Optional[int]=True ) -> Dict:
lowercase_ = max([len(SCREAMING_SNAKE_CASE_ ) for one in nested_token_ids] )
lowercase_ = {}
for token_ids in nested_token_ids:
lowercase_ = root
for tidx, token_id in enumerate(SCREAMING_SNAKE_CASE_ ):
if token_id not in level:
lowercase_ = {}
lowercase_ = level[token_id]
if no_subsets and self.has_subsets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f''' {nested_token_ids}.''' )
lowercase_ = root
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
lowercase_ = self.trie
for current_token in current_seq:
lowercase_ = start[current_token]
lowercase_ = list(start.keys() )
return next_tokens
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
lowercase_ = self.next_tokens(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 0
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
lowercase_ = list(root.values() )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 1
else:
return sum([self.count_leaves(SCREAMING_SNAKE_CASE_ ) for nn in next_nodes] )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
lowercase_ = self.count_leaves(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) != leaf_count
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[List[int]] ) -> List[str]:
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowercase_ = DisjunctiveTrie(SCREAMING_SNAKE_CASE_ )
lowercase_ = nested_token_ids
lowercase_ = self.trie.max_height
lowercase_ = []
lowercase_ = False
def _lowercase ( self : Dict ) -> int:
lowercase_ = self.trie.next_tokens(self.current_seq )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}''' )
lowercase_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}''' )
lowercase_ = False
lowercase_ = False
lowercase_ = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.current_seq.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = True
else:
lowercase_ = True
self.reset()
lowercase_ = self.trie.reached_leaf(self.current_seq )
lowercase_ = completed
return stepped, completed, reset
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = False
lowercase_ = []
def _lowercase ( self : Any ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ) -> Dict:
lowercase_ = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase_ = self.seqlen
lowercase_ = self.current_seq
lowercase_ = self.completed
return new_constraint
class lowercase__:
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Constraint] ) -> Tuple:
lowercase_ = constraints
# max # of steps required to fulfill a given constraint
lowercase_ = max([c.seqlen for c in constraints] )
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
lowercase_ = False
self.init_state()
def _lowercase ( self : str ) -> Tuple:
lowercase_ = []
lowercase_ = None
lowercase_ = [constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.constraints]
def _lowercase ( self : Optional[Any] ) -> Any:
lowercase_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _lowercase ( self : int ) -> Tuple:
lowercase_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase_ = constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = self.inprogress_constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] ) -> Optional[int]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase_ , lowercase_ = self.add(SCREAMING_SNAKE_CASE_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowercase_ , lowercase_ = False, False
if self.completed:
lowercase_ = True
lowercase_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase_ , lowercase_ , lowercase_ = self.inprogress_constraint.update(SCREAMING_SNAKE_CASE_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) )
lowercase_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase_ = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(SCREAMING_SNAKE_CASE_ ):
lowercase_ , lowercase_ , lowercase_ = pending_constraint.update(SCREAMING_SNAKE_CASE_ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = None
if not complete and stepped:
lowercase_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any]=True ) -> str:
lowercase_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase_ = [
constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase_ = self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ )
lowercase_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 97 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''decision_transformer'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : str = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=17 , _lowercase=4 , _lowercase=128 , _lowercase=4_096 , _lowercase=True , _lowercase=1 , _lowercase=1_024 , _lowercase=3 , _lowercase=1 , _lowercase=None , _lowercase="relu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=False , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = state_dim
_lowerCAmelCase = act_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_ep_len
_lowerCAmelCase = action_tanh
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 5 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'pegasus'
_snake_case : str = ['past_key_values']
_snake_case : List[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , lowerCAmelCase__ : int=50265 , lowerCAmelCase__ : int=1024 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : Union[str, Any]=4096 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Dict=12 , lowerCAmelCase__ : Optional[Any]=4096 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[Any]=1024 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : Tuple=1 , **lowerCAmelCase__ : str , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = encoder_layers
_UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
return self.d_model
| 98 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_lowercase = None
_lowercase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_lowercase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def A (__lowerCamelCase :int , __lowerCamelCase :Optional[Any]=1 , __lowerCamelCase :List[Any]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A (__lowerCamelCase :Any ):
with open(__lowerCamelCase , """r""" ) as f:
return json.load(__lowerCamelCase )
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :int ):
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple=True ):
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """tmp""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = read_json(os.path.join(__lowerCamelCase , """params.json""" ) )
_lowerCAmelCase = NUM_SHARDS[model_size]
_lowerCAmelCase = params["""n_layers"""]
_lowerCAmelCase = params["""n_heads"""]
_lowerCAmelCase = n_heads // num_shards
_lowerCAmelCase = params["""dim"""]
_lowerCAmelCase = dim // n_heads
_lowerCAmelCase = 10_000.0
_lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , __lowerCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
_lowerCAmelCase = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase = n_heads
_lowerCAmelCase = n_heads_per_shard
_lowerCAmelCase = dim
# permute for sliced rotary
def permute(__lowerCamelCase :Optional[int] , __lowerCamelCase :str=n_heads , __lowerCamelCase :str=dim , __lowerCamelCase :List[Any]=dim ):
return w.view(__lowerCamelCase , dima // n_heads // 2 , 2 , __lowerCamelCase ).transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase = torch.load(os.path.join(__lowerCamelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_lowerCAmelCase = [
torch.load(os.path.join(__lowerCamelCase , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__lowerCamelCase )
]
_lowerCAmelCase = 0
_lowerCAmelCase = {"""weight_map""": {}}
for layer_i in range(__lowerCamelCase ):
_lowerCAmelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_lowerCAmelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_lowerCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__lowerCamelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__lowerCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
# Write configs
_lowerCAmelCase = {"""total_size""": param_count * 2}
write_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """pytorch_model.bin.index.json""" ) )
_lowerCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_lowerCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
_lowerCAmelCase = LlamaConfig(
hidden_size=__lowerCamelCase , intermediate_size=compute_intermediate_size(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__lowerCamelCase , )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_lowerCAmelCase = LlamaForCausalLM.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__lowerCamelCase , safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any] ):
# Initialize the tokenizer based on the `spm` model
_lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def A ():
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__lowerCamelCase , help="""Whether or not to save using `safetensors`.""" )
_lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5 | 0 |
from __future__ import annotations
def a (lowerCAmelCase__ , lowerCAmelCase__ = None ):
__a = word_bank or []
# create a table
__a = len(lowerCAmelCase__ ) + 1
__a = []
for _ in range(lowerCAmelCase__ ):
table.append([] )
# seed value
__a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase__ )] == word:
__a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase__ )]:
combination.reverse()
return table[len(lowerCAmelCase__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 99 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = (DDPMScheduler,)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowercase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 5 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = GPTaTokenizer
lowerCamelCase__ : Any = GPTaTokenizerFast
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : int = {"""add_prefix_space""": True}
lowerCamelCase__ : List[str] = False
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
SCREAMING_SNAKE_CASE__ = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE__ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A_ ) )
def lowercase_ ( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowercase_ ( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = '''lower newer'''
return input_text, output_text
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ , add_prefix_space=A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def lowercase_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer(add_prefix_space=A_ )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
# Testing tokenization
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ , add_prefix_space=A_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer(add_prefix_space=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ , add_prefix_space=A_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def lowercase_ ( self , *A_ , **A_ ):
'''simple docstring'''
pass
def lowercase_ ( self , A_=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
SCREAMING_SNAKE_CASE__ = '''This is a simple input'''
SCREAMING_SNAKE_CASE__ = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE__ = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='''max_length''' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='''max_length''' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='''max_length''' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='''max_length''' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='''max_length''' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='''max_length''' , )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
SCREAMING_SNAKE_CASE__ = '''This is a simple input'''
SCREAMING_SNAKE_CASE__ = ['''This is a simple input looooooooong''', '''This is a simple input''']
SCREAMING_SNAKE_CASE__ = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE__ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
SCREAMING_SNAKE_CASE__ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = tokenizer(*A_ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''$$$'''
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ )
SCREAMING_SNAKE_CASE__ = '''This is a simple input'''
SCREAMING_SNAKE_CASE__ = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE__ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ = tokenizer(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] , A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ = '''Encode this.'''
SCREAMING_SNAKE_CASE__ = '''This one too please.'''
SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ , add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode_plus(
A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , )
SCREAMING_SNAKE_CASE__ = encoded_sequence_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(A_ ) , len(A_ ) )
SCREAMING_SNAKE_CASE__ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
SCREAMING_SNAKE_CASE__ = [x for x in filtered_sequence if x is not None]
self.assertEqual(A_ , A_ )
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=A_ )
SCREAMING_SNAKE_CASE__ = '''A photo of a cat'''
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''test_opt''' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''./test_opt''' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=A_ )
SCREAMING_SNAKE_CASE__ = '''A photo of a cat'''
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
A_ , )
# Same as above
self.assertEqual(A_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=A_ )
SCREAMING_SNAKE_CASE__ = '''bos'''
SCREAMING_SNAKE_CASE__ = tokenizer.get_vocab()['''bos''']
SCREAMING_SNAKE_CASE__ = '''A photo of a cat'''
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
A_ , )
# We changed the bos token
self.assertEqual(A_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''./tok''' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
A_ , )
self.assertEqual(A_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 100 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_SCREAMING_SNAKE_CASE )} )
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowercase : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowercase : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowercase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowercase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''train'''
_lowercase : Union[str, Any] = '''dev'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = False , _lowercase = None , _lowercase = "pt" , ):
"""simple docstring"""
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features["""features"""]
_lowerCAmelCase = self.old_features.get("""dataset""" , _lowercase )
_lowerCAmelCase = self.old_features.get("""examples""" , _lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
_lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 5 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[list[Edge]] = [[] for _ in range(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE_ : List[str] = size
def __getitem__( self , lowerCAmelCase__ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._size
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = deque([start_vertex] )
SCREAMING_SNAKE_CASE_ : list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE_ : int = 0
while queue:
SCREAMING_SNAKE_CASE_ : Tuple = queue.popleft()
SCREAMING_SNAKE_CASE_ : Dict = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE_ : Dict = current_distance + edge.weight
SCREAMING_SNAKE_CASE_ : List[Any] = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''dpr'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase = 0 , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = projection_dim
_lowerCAmelCase = position_embedding_type
| 5 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 1000 , SCREAMING_SNAKE_CASE = True ):
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return int((number_a + number_a) / 2 )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(SCREAMING_SNAKE_CASE ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
UpperCamelCase : Union[str, Any] = lower
UpperCamelCase : Any = higher
UpperCamelCase : List[str] = []
while True:
UpperCamelCase : Optional[int] = get_avg(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
last_numbers.append(SCREAMING_SNAKE_CASE )
if answer(SCREAMING_SNAKE_CASE ) == "low":
UpperCamelCase : List[Any] = number
elif answer(SCREAMING_SNAKE_CASE ) == "high":
UpperCamelCase : Optional[Any] = number
else:
break
print(f"""guess the number : {last_numbers[-1]}""" )
print(f"""details : {last_numbers!s}""" )
def UpperCamelCase ():
UpperCamelCase : List[str] = int(input("""Enter lower value : """ ).strip() )
UpperCamelCase : Dict = int(input("""Enter high value : """ ).strip() )
UpperCamelCase : List[str] = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 102 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_lowercase = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_lowercase = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_lowercase = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase ( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase ( self , _lowercase , _lowercase , _lowercase=None , _lowercase="uniform_average" , _lowercase=True ):
"""simple docstring"""
_lowerCAmelCase = mean_squared_error(
_lowercase , _lowercase , sample_weight=_lowercase , multioutput=_lowercase , squared=_lowercase )
return {"mse": mse}
| 5 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
snake_case = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
snake_case = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
snake_case = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int = CHRF.CHAR_ORDER , __lowerCamelCase : int = CHRF.WORD_ORDER , __lowerCamelCase : int = CHRF.BETA , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , ):
"""simple docstring"""
_snake_case = len(references[0] )
if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_snake_case = [[refs[i] for refs in references] for i in range(__lowerCamelCase )]
_snake_case = CHRF(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = sb_chrf.corpus_score(__lowerCamelCase , __lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 103 |
'''simple docstring'''
def A ():
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 5 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCamelCase = TypeVar("""KT""")
UpperCamelCase = TypeVar("""VT""")
class UpperCamelCase__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = "root" , SCREAMING_SNAKE_CASE__ = None ) -> List[str]:
A__ = key
A__ = value
A__ = []
def __repr__( self ) -> str:
return f"""Node({self.key}: {self.value})"""
@property
def snake_case__ ( self ) -> int:
return len(self.forward )
class UpperCamelCase__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = 0.5 , SCREAMING_SNAKE_CASE__ = 16 ) -> Any:
A__ = Node[KT, VT]()
A__ = 0
A__ = p
A__ = max_level
def __str__( self ) -> str:
A__ = list(self )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return f"""SkipList(level={self.level})"""
A__ = max((len(str(SCREAMING_SNAKE_CASE__ ) ) for item in items) , default=4 )
A__ = max(SCREAMING_SNAKE_CASE__ , 4 ) + 4
A__ = self.head
A__ = []
A__ = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(SCREAMING_SNAKE_CASE__ , "-" ) + "* " * len(SCREAMING_SNAKE_CASE__ ) )
lines.append(" " * label_size + "| " * len(SCREAMING_SNAKE_CASE__ ) )
while len(node.forward ) != 0:
A__ = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(SCREAMING_SNAKE_CASE__ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(SCREAMING_SNAKE_CASE__ ) )
A__ = node.forward
lines.append("None".ljust(SCREAMING_SNAKE_CASE__ ) + "* " * len(SCREAMING_SNAKE_CASE__ ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(SCREAMING_SNAKE_CASE__ )
def __iter__( self ) -> List[Any]:
A__ = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
A__ = node.forward[0]
def snake_case__ ( self ) -> int:
A__ = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
A__ = []
A__ = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
A__ = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(SCREAMING_SNAKE_CASE__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ , A__ = self._locate_node(SCREAMING_SNAKE_CASE__ )
if node is not None:
for i, update_node in enumerate(SCREAMING_SNAKE_CASE__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
A__ = node.forward[i]
else:
A__ = update_node.forward[:i]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ , A__ = self._locate_node(SCREAMING_SNAKE_CASE__ )
if node is not None:
A__ = value
else:
A__ = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , SCREAMING_SNAKE_CASE__ ):
update_vector.append(self.head )
A__ = level
A__ = Node(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(SCREAMING_SNAKE_CASE__ )
else:
A__ = new_node
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> VT | None:
A__ , A__ = self._locate_node(SCREAMING_SNAKE_CASE__ )
if node is not None:
return node.value
return None
def _lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ = SkipList()
skip_list.insert("Key1", 3 )
skip_list.insert("Key2", 12 )
skip_list.insert("Key3", 41 )
skip_list.insert("Key4", -19 )
A__ = skip_list.head
A__ = {}
while node.level != 0:
A__ = node.forward[0]
A__ = node.value
assert len(UpperCAmelCase_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = SkipList()
skip_list.insert("Key1", 10 )
skip_list.insert("Key1", 12 )
skip_list.insert("Key5", 7 )
skip_list.insert("Key7", 10 )
skip_list.insert("Key10", 5 )
skip_list.insert("Key7", 7 )
skip_list.insert("Key5", 5 )
skip_list.insert("Key10", 10 )
A__ = skip_list.head
A__ = {}
while node.level != 0:
A__ = node.forward[0]
A__ = node.value
if len(UpperCAmelCase_ ) != 4:
print()
assert len(UpperCAmelCase_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ = SkipList()
assert skip_list.find("Some key" ) is None
def _lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ = SkipList()
skip_list.insert("Key2", 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key", 10 )
skip_list.insert("Key2", 8 )
skip_list.insert("V", 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def _lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def _lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ = SkipList()
skip_list.insert("Key1", 12 )
skip_list.insert("V", 13 )
skip_list.insert("X", 14 )
skip_list.insert("Key2", 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def _lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ = SkipList()
skip_list.insert("Key1", 12 )
skip_list.insert("V", 13 )
skip_list.insert("X", 14 )
skip_list.insert("Key2", 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def _lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ = SkipList()
skip_list.insert("Key1", 12 )
skip_list.insert("V", 13 )
skip_list.insert("X", 142 )
skip_list.insert("Key2", 15 )
skip_list.delete("X" )
def traverse_keys(UpperCAmelCase_ : Any ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCAmelCase_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
def is_sorted(UpperCAmelCase_ : Dict ):
return all(next_item >= item for item, next_item in zip(UpperCAmelCase_, lst[1:] ) )
A__ = SkipList()
for i in range(10 ):
skip_list.insert(UpperCAmelCase_, UpperCAmelCase_ )
assert is_sorted(list(UpperCAmelCase_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCAmelCase_ ) )
skip_list.insert(-12, -12 )
skip_list.insert(77, 77 )
assert is_sorted(list(UpperCAmelCase_ ) )
def _lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ = SkipList()
skip_list.insert(2, "2" )
skip_list.insert(4, "4" )
skip_list.insert(6, "4" )
skip_list.insert(4, "5" )
skip_list.insert(8, "4" )
skip_list.insert(9, "4" )
skip_list.delete(4 )
print(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 104 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 5 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ : str = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 105 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 5 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 106 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase = BeautifulSoup(res.text, """html.parser""")
_lowercase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 5 | 0 |
'''simple docstring'''
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple, UpperCamelCase__ : int ) -> None:
_A = size
_A = [0] * size
_A = [0] * size
@staticmethod
def __UpperCAmelCase ( UpperCamelCase__ : int ) -> int:
return index | (index + 1)
@staticmethod
def __UpperCAmelCase ( UpperCamelCase__ : int ) -> int:
return (index & (index + 1)) - 1
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : int, UpperCamelCase__ : int ) -> None:
_A = value
while index < self.size:
_A = self.get_prev(UpperCamelCase__ ) + 1
if current_left_border == index:
_A = value
else:
_A = max(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
_A = self.get_next(UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : int ) -> int:
right -= 1 # Because of right is exclusive
_A = 0
while left <= right:
_A = self.get_prev(UpperCamelCase__ )
if left <= current_left:
_A = max(UpperCamelCase__, self.tree[right] )
_A = current_left
else:
_A = max(UpperCamelCase__, self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A ():
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_lowerCAmelCase = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 5 | 0 |
__a: List[Any] = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 108 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'glpn'
def __init__( self : Dict ,lowerCamelCase : Tuple=3 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Dict=[2, 2, 2, 2] ,lowerCamelCase : Union[str, Any]=[8, 4, 2, 1] ,lowerCamelCase : Any=[32, 64, 160, 256] ,lowerCamelCase : Tuple=[7, 3, 3, 3] ,lowerCamelCase : List[str]=[4, 2, 2, 2] ,lowerCamelCase : str=[1, 2, 5, 8] ,lowerCamelCase : Union[str, Any]=[4, 4, 4, 4] ,lowerCamelCase : Any="gelu" ,lowerCamelCase : int=0.0 ,lowerCamelCase : str=0.0 ,lowerCamelCase : Dict=0.02 ,lowerCamelCase : Dict=0.1 ,lowerCamelCase : Union[str, Any]=1E-6 ,lowerCamelCase : Dict=64 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : int=-1 ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_encoder_blocks
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = sr_ratios
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = strides
__SCREAMING_SNAKE_CASE = mlp_ratios
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = decoder_hidden_size
__SCREAMING_SNAKE_CASE = max_depth
__SCREAMING_SNAKE_CASE = head_in_index
| 109 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : int = data
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[int] = None
def lowerCamelCase ( ):
print('\n********Press N to stop entering at any point of time********\n' )
UpperCAmelCase__ : int = input('Enter the value of the root node: ' ).strip().lower()
UpperCAmelCase__ : queue.Queue = queue.Queue()
UpperCAmelCase__ : Dict = TreeNode(int(_snake_case ) )
q.put(_snake_case )
while not q.empty():
UpperCAmelCase__ : Tuple = q.get()
UpperCAmelCase__ : List[Any] = F'''Enter the left node of {node_found.data}: '''
UpperCAmelCase__ : Dict = input(_snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ : Optional[int] = TreeNode(int(_snake_case ) )
UpperCAmelCase__ : Tuple = left_node
q.put(_snake_case )
UpperCAmelCase__ : int = F'''Enter the right node of {node_found.data}: '''
UpperCAmelCase__ : int = input(_snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ : int = TreeNode(int(_snake_case ) )
UpperCAmelCase__ : Any = right_node
q.put(_snake_case )
raise
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : queue.Queue = queue.Queue()
q.put(_snake_case )
while not q.empty():
UpperCAmelCase__ : int = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : queue.Queue = queue.Queue()
q.put(_snake_case )
while not q.empty():
UpperCAmelCase__ : Optional[Any] = []
while not q.empty():
UpperCAmelCase__ : Union[str, Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_snake_case )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : list[TreeNode] = []
UpperCAmelCase__ : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(_snake_case )
UpperCAmelCase__ : Optional[int] = n.left
# end of while means current node doesn't have left child
UpperCAmelCase__ : Union[str, Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase__ : Dict = n.right
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : list[TreeNode] = []
UpperCAmelCase__ : Optional[Any] = node
while n or stack:
while n:
stack.append(_snake_case )
UpperCAmelCase__ : int = n.left
UpperCAmelCase__ : Tuple = stack.pop()
print(n.data ,end=',' )
UpperCAmelCase__ : Tuple = n.right
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ , UpperCAmelCase__ : str = [], []
UpperCAmelCase__ : Union[str, Any] = node
stacka.append(_snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase__ : Tuple = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowerCamelCase ( _snake_case = "" ,_snake_case=50 ,_snake_case="*" ):
if not s:
return "\n" + width * char
UpperCAmelCase__ , UpperCAmelCase__ : str = divmod(width - len(_snake_case ) - 2 ,2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
UpperCamelCase__ = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 110 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Any = '''gpt_neo'''
snake_case__ : Dict = ['''past_key_values''']
snake_case__ : str = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , a__=50257 , a__=2048 , a__=2048 , a__=24 , a__=[[["global", "local"], 12]] , a__=16 , a__=None , a__=256 , a__="gelu_new" , a__=0.0 , a__=0.0 , a__=0.0 , a__=0.1 , a__=1e-5 , a__=0.02 , a__=True , a__=50256 , a__=50256 , **a__ , ):
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_layers
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : int = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = window_size
__SCREAMING_SNAKE_CASE : Dict = activation_function
__SCREAMING_SNAKE_CASE : str = resid_dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
__SCREAMING_SNAKE_CASE : Dict = classifier_dropout
__SCREAMING_SNAKE_CASE : str = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Tuple = bos_token_id
__SCREAMING_SNAKE_CASE : Any = eos_token_id
__SCREAMING_SNAKE_CASE : List[str] = attention_types
__SCREAMING_SNAKE_CASE : Any = self.expand_attention_types_params(_lowercase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
@staticmethod
def a_ ( a__ ):
__SCREAMING_SNAKE_CASE : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __A ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE : Dict = input.size()
__SCREAMING_SNAKE_CASE : Tuple = len(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = shape[dimension]
__SCREAMING_SNAKE_CASE : Dict = torch.arange(0 , __lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = torch.div(sizedim - size , __lowerCamelCase , rounding_mode="floor" ) + 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.arange(__lowerCamelCase ) + low_indices[:min_length][:, None]
__SCREAMING_SNAKE_CASE : Optional[Any] = [slice(__lowerCamelCase )] * rank
__SCREAMING_SNAKE_CASE : str = indices
__SCREAMING_SNAKE_CASE : Dict = input[s]
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowerCamelCase )
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE : List[Any] = torch.arange(1 , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = torch.remainder(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = remainders == 0
__SCREAMING_SNAKE_CASE : Tuple = candidates[divisor_indices]
__SCREAMING_SNAKE_CASE : Optional[int] = torch.max(__lowerCamelCase )
return largest_divisor, torch.div(__lowerCamelCase , __lowerCamelCase , rounding_mode="floor" )
class __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction="inputs" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
__SCREAMING_SNAKE_CASE : Tuple = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a_ ( self ):
return self._config.num_heads
def a_ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE : List[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE : Optional[int] = seqlen + 2
__SCREAMING_SNAKE_CASE : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE : int = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE : List[Any] = common_inputs["attention_mask"]
if self.use_past:
__SCREAMING_SNAKE_CASE : Any = ordered_inputs["attention_mask"].dtype
__SCREAMING_SNAKE_CASE : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def a_ ( self ):
return 13
| 211 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = ['''input_values''', '''padding_mask''']
def __init__( self , _lowercase = 1 , _lowercase = 24_000 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , **_lowercase , ):
"""simple docstring"""
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _lowercase , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
_lowerCAmelCase = True
_lowerCAmelCase = bool(
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_lowerCAmelCase = [np.asarray(_lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray(_lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(_lowercase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
_lowerCAmelCase = None
_lowerCAmelCase = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowerCAmelCase = min(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.floor(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowerCAmelCase = max(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.ceil(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowerCAmelCase = """max_length"""
else:
_lowerCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
_lowerCAmelCase = self.pad(
_lowercase , max_length=_lowercase , truncation=_lowercase , padding=_lowercase , return_attention_mask=_lowercase , )
if padding:
_lowerCAmelCase = padded_inputs.pop("""attention_mask""" )
_lowerCAmelCase = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
_lowerCAmelCase = example[..., None]
input_values.append(example.T )
_lowerCAmelCase = input_values
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 5 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self : Tuple , A_ : Tuple , A_ : List[str]=1_3 , A_ : str=3_2 , A_ : str=3 , A_ : Union[str, Any]=4 , A_ : Tuple=[1_0, 2_0, 3_0, 4_0] , A_ : str=[2, 2, 3, 2] , A_ : Tuple=True , A_ : Dict=True , A_ : Any=3_7 , A_ : List[Any]="gelu" , A_ : List[str]=1_0 , A_ : Any=0.02 , A_ : Tuple=["stage2", "stage3", "stage4"] , A_ : Tuple=3 , A_ : int=None , ):
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : List[str] = image_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : Union[str, Any] = num_stages
lowerCAmelCase_ : str = hidden_sizes
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : str = use_labels
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : str = type_sequence_label_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Tuple = out_features
lowerCAmelCase_ : int = num_labels
lowerCAmelCase_ : Optional[int] = scope
lowerCAmelCase_ : Union[str, Any] = num_stages
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase_ : str = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : int):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase__ ( self : Any):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowercase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self : Optional[int] , A_ : Dict , A_ : Optional[int] , A_ : int):
lowerCAmelCase_ : List[str] = UperNetForSemanticSegmentation(config=_lowercase)
model.to(_lowercase)
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(_lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Any = config_and_inputs
lowerCAmelCase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase ):
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : str = UperNetModelTester(self)
lowerCAmelCase_ : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=3_7)
def UpperCAmelCase__ ( self : Any):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Optional[int]):
return
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = model_class(_lowercase)
lowerCAmelCase_ : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase)
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def UpperCAmelCase__ ( self : Dict):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def UpperCAmelCase__ ( self : Tuple):
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase__ ( self : Tuple):
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase__ ( self : int):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase__ ( self : int):
pass
def UpperCAmelCase__ ( self : Tuple):
def check_hidden_states_output(A_ : int , A_ : List[Any] , A_ : List[str]):
lowerCAmelCase_ : Union[str, Any] = model_class(_lowercase)
model.to(_lowercase)
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(_lowercase , _lowercase))
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowercase) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : List[Any] = _config_zero_init(_lowercase)
lowerCAmelCase_ : Optional[Any] = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(config=_lowercase)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def UpperCAmelCase__ ( self : Union[str, Any]):
pass
@slow
def UpperCAmelCase__ ( self : Any):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Any = UperNetForSemanticSegmentation.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' ,repo_type='''dataset''' ,filename='''ADE_val_00000001.jpg''' )
lowerCAmelCase_ : Any = Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Optional[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
lowerCAmelCase_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(_lowercase)
lowerCAmelCase_ : Optional[int] = prepare_img()
lowerCAmelCase_ : int = processor(images=_lowercase , return_tensors='''pt''').to(_lowercase)
with torch.no_grad():
lowerCAmelCase_ : Any = model(**_lowercase)
lowerCAmelCase_ : Optional[Any] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2))
self.assertEqual(outputs.logits.shape , _lowercase)
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(_lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4))
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
lowerCAmelCase_ : List[str] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(_lowercase)
lowerCAmelCase_ : Dict = prepare_img()
lowerCAmelCase_ : Union[str, Any] = processor(images=_lowercase , return_tensors='''pt''').to(_lowercase)
with torch.no_grad():
lowerCAmelCase_ : str = model(**_lowercase)
lowerCAmelCase_ : Optional[Any] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2))
self.assertEqual(outputs.logits.shape , _lowercase)
lowerCAmelCase_ : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(_lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4))
| 171 |
'''simple docstring'''
_lowercase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5 | 0 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase )
def snake_case__ ( self ) -> List[str]:
A__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowercase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def snake_case__ ( self ) -> str:
A__ = None
ops.enable_eager_execution_internal()
A__ = tf.config.list_physical_devices("CPU" )
if len(_lowercase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A__ = tf.config.list_logical_devices(device_type="CPU" )
A__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A__ = GradientAccumulator()
A__ = tf.Variable([4.0, 3.0] )
A__ , A__ = create_optimizer(5e-5 , 10 , 5 )
A__ = tf.Variable([0.0, 0.0] , trainable=_lowercase )
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with strategy.scope():
A__ = strategy.experimental_local_results(_lowercase )
local_variables[0].assign(_lowercase )
local_variables[1].assign(_lowercase )
strategy.run(_lowercase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowercase )
def _check_local_values(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowercase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _lowercase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 104 |
'''simple docstring'''
import functools
def A (__lowerCamelCase :list[int] , __lowerCamelCase :list[int] ):
# Validation
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__lowerCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
_lowerCAmelCase = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase :int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.test()
def UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
A_ = 0
A_ = False
while not completed:
if counter == 1:
self.reset()
A_ = self.advance()
if not self.does_advance(_lowercase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
A_ ,A_ ,A_ = self.update(_lowercase )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : str ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Optional[Any]=False ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , lowerCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
super(_lowercase , self ).__init__()
if not isinstance(_lowercase , _lowercase ) or len(_lowercase ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(_lowercase , _lowercase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
A_ = token_ids
A_ = len(self.token_ids )
A_ = -1 # the index of the currently fulfilled step
A_ = False
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(_lowercase )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : str ) -> Dict:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(_lowercase )}" )
A_ = False
A_ = False
A_ = False
if self.does_advance(_lowercase ):
self.fulfilled_idx += 1
A_ = True
if self.fulfilled_idx == (self.seqlen - 1):
A_ = True
A_ = completed
else:
# failed to make progress.
A_ = True
self.reset()
return stepped, completed, reset
def UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A_ = False
A_ = 0
def UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase ( self : str , lowerCamelCase__ : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
A_ = PhrasalConstraint(self.token_ids )
if stateful:
A_ = self.seqlen
A_ = self.fulfilled_idx
A_ = self.completed
return new_constraint
class _lowercase :
def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any]=True ) -> List[str]:
"""simple docstring"""
A_ = max([len(_lowercase ) for one in nested_token_ids] )
A_ = {}
for token_ids in nested_token_ids:
A_ = root
for tidx, token_id in enumerate(_lowercase ):
if token_id not in level:
A_ = {}
A_ = level[token_id]
if no_subsets and self.has_subsets(_lowercase , _lowercase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F" {nested_token_ids}." )
A_ = root
def UpperCamelCase ( self : str , lowerCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
A_ = self.trie
for current_token in current_seq:
A_ = start[current_token]
A_ = list(start.keys() )
return next_tokens
def UpperCamelCase ( self : int , lowerCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = self.next_tokens(_lowercase )
return len(_lowercase ) == 0
def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A_ = list(root.values() )
if len(_lowercase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowercase ) for nn in next_nodes] )
def UpperCamelCase ( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
A_ = self.count_leaves(_lowercase )
return len(_lowercase ) != leaf_count
class _lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
super(_lowercase , self ).__init__()
if not isinstance(_lowercase , _lowercase ) or len(_lowercase ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(_lowercase , _lowercase ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(_lowercase , _lowercase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
A_ = DisjunctiveTrie(_lowercase )
A_ = nested_token_ids
A_ = self.trie.max_height
A_ = []
A_ = False
def UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
A_ = self.trie.next_tokens(self.current_seq )
if len(_lowercase ) == 0:
return None
else:
return token_list
def UpperCamelCase ( self : int , lowerCamelCase__ : str ) -> str:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowercase )}" )
A_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : Dict ) -> Any:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowercase )}" )
A_ = False
A_ = False
A_ = False
if self.does_advance(_lowercase ):
self.current_seq.append(_lowercase )
A_ = True
else:
A_ = True
self.reset()
A_ = self.trie.reached_leaf(self.current_seq )
A_ = completed
return stepped, completed, reset
def UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
A_ = False
A_ = []
def UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
A_ = DisjunctiveConstraint(self.token_ids )
if stateful:
A_ = self.seqlen
A_ = self.current_seq
A_ = self.completed
return new_constraint
class _lowercase :
def __init__( self : Dict , lowerCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
A_ = constraints
# max # of steps required to fulfill a given constraint
A_ = max([c.seqlen for c in constraints] )
A_ = len(_lowercase )
A_ = False
self.init_state()
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
A_ = []
A_ = None
A_ = [constraint.copy(stateful=_lowercase ) for constraint in self.constraints]
def UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
A_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
A_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
A_ = constraint.advance()
if isinstance(_lowercase , _lowercase ):
token_list.append(_lowercase )
elif isinstance(_lowercase , _lowercase ):
token_list.extend(_lowercase )
else:
A_ = self.inprogress_constraint.advance()
if isinstance(_lowercase , _lowercase ):
token_list.append(_lowercase )
elif isinstance(_lowercase , _lowercase ):
token_list.extend(_lowercase )
if len(_lowercase ) == 0:
return None
else:
return token_list
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
A_ ,A_ = self.add(_lowercase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : str ) -> List[str]:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
A_ ,A_ = False, False
if self.completed:
A_ = True
A_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
A_ ,A_ ,A_ = self.inprogress_constraint.update(_lowercase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowercase ) )
A_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
A_ = None
if len(self.pending_constraints ) == 0:
# we're done!
A_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowercase ):
A_ ,A_ ,A_ = pending_constraint.update(_lowercase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowercase )
A_ = None
if not complete and stepped:
A_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
A_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
A_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase ( self : int , lowerCamelCase__ : Optional[int]=True ) -> Optional[Any]:
"""simple docstring"""
A_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
A_ = [
constraint.copy(stateful=_lowercase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
A_ = self.inprogress_constraint.copy(stateful=_lowercase )
A_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 203 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 384
if "tiny" in model_name:
_lowerCAmelCase = [3, 3, 9, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [128, 256, 512, 1024]
_lowerCAmelCase = 512
if "large" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [192, 384, 768, 1536]
_lowerCAmelCase = 768
if "xlarge" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [256, 512, 1024, 2048]
_lowerCAmelCase = 1024
# set label information
_lowerCAmelCase = 150
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """ade20k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = ConvNextConfig(
depths=__lowerCamelCase , hidden_sizes=__lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
_lowerCAmelCase = UperNetConfig(
backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def A (__lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Dict , __lowerCamelCase :Tuple ):
_lowerCAmelCase = dct.pop(__lowerCamelCase )
_lowerCAmelCase = val
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Any ):
_lowerCAmelCase = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""state_dict"""]
_lowerCAmelCase = get_upernet_config(__lowerCamelCase )
_lowerCAmelCase = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(__lowerCamelCase )
if "bn" in key:
_lowerCAmelCase = key.replace("""bn""" , """batch_norm""" )
_lowerCAmelCase = val
# rename keys
_lowerCAmelCase = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
_lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_lowerCAmelCase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
_lowerCAmelCase = SegformerImageProcessor()
_lowerCAmelCase = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_lowerCAmelCase = model(__lowerCamelCase )
if model_name == "upernet-convnext-tiny":
_lowerCAmelCase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_lowerCAmelCase = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_lowerCAmelCase = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_lowerCAmelCase = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_lowerCAmelCase = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 5 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures")
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = mock.Mock()
SCREAMING_SNAKE_CASE_ :List[str] = 5_00
SCREAMING_SNAKE_CASE_ :Any = {}
SCREAMING_SNAKE_CASE_ :Optional[Any] = HTTPError
SCREAMING_SNAKE_CASE_ :List[Any] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ :Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowercase) as mock_head:
SCREAMING_SNAKE_CASE_ :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2")
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ :List[str] = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json")
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _snake_case ( cls : str):
SCREAMING_SNAKE_CASE_ :List[Any] = TOKEN
HfFolder.save_token(_lowercase)
@classmethod
def _snake_case ( cls : Dict):
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor")
except HTTPError:
pass
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Any = WavaVecaFeatureExtractor.from_pretrained(_lowercase)
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ :int = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase))
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowercase , repo_id="test-feature-extractor" , push_to_hub=_lowercase , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ :List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase))
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :int = WavaVecaFeatureExtractor.from_pretrained(_lowercase)
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ :Optional[int] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowercase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=_lowercase , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ :Tuple = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase))
def _snake_case ( self : Tuple):
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ :Optional[Any] = CustomFeatureExtractor.from_pretrained(_lowercase)
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=_lowercase)
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor")
| 631 |
'''simple docstring'''
from itertools import product
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
_lowerCAmelCase = sides_number
_lowerCAmelCase = max_face_number * dice_number
_lowerCAmelCase = [0] * (max_total + 1)
_lowerCAmelCase = 1
_lowerCAmelCase = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
_lowerCAmelCase = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def A ():
_lowerCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCAmelCase = 0
_lowerCAmelCase = 9
_lowerCAmelCase = 4 * 9
_lowerCAmelCase = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCAmelCase = (4**9) * (6**6)
_lowerCAmelCase = peter_wins_count / total_games_number
_lowerCAmelCase = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
lowerCAmelCase__ : Dict =None
lowerCAmelCase__ : Optional[int] ={
'7B': 1_10_08,
'13B': 1_38_24,
'30B': 1_79_20,
'65B': 2_20_16,
'70B': 2_86_72,
}
lowerCAmelCase__ : List[str] ={
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def a__ ( A__, A__=1, A__=2_5_6 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def a__ ( A__ ):
with open(__lowerCamelCase, 'r' ) as f:
return json.load(__lowerCamelCase )
def a__ ( A__, A__ ):
with open(__lowerCamelCase, 'w' ) as f:
json.dump(__lowerCamelCase, __lowerCamelCase )
def a__ ( A__, A__, A__, A__=True ):
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(__lowerCamelCase, 'tmp' )
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : str = read_json(os.path.join(__lowerCamelCase, 'params.json' ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE_ : int = params['n_layers']
SCREAMING_SNAKE_CASE_ : Dict = params['n_heads']
SCREAMING_SNAKE_CASE_ : Tuple = n_heads // num_shards
SCREAMING_SNAKE_CASE_ : int = params['dim']
SCREAMING_SNAKE_CASE_ : Dict = dim // n_heads
SCREAMING_SNAKE_CASE_ : List[Any] = 1_0_0_0_0.0
SCREAMING_SNAKE_CASE_ : List[Any] = 1.0 / (base ** (torch.arange(0, __lowerCamelCase, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE_ : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
SCREAMING_SNAKE_CASE_ : List[str] = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE_ : Any = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE_ : Dict = n_heads
SCREAMING_SNAKE_CASE_ : List[Any] = n_heads_per_shard
SCREAMING_SNAKE_CASE_ : Optional[Any] = dim
# permute for sliced rotary
def permute(A__, A__=n_heads, A__=dim, A__=dim ):
return w.view(__lowerCamelCase, dima // n_heads // 2, 2, __lowerCamelCase ).transpose(1, 2 ).reshape(__lowerCamelCase, __lowerCamelCase )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(os.path.join(__lowerCamelCase, 'consolidated.00.pth' ), map_location='cpu' )
else:
# Sharded
SCREAMING_SNAKE_CASE_ : List[str] = [
torch.load(os.path.join(__lowerCamelCase, F'''consolidated.{i:02d}.pth''' ), map_location='cpu' )
for i in range(__lowerCamelCase )
]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = {'weight_map': {}}
for layer_i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE_ : Tuple = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE_ : str = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase ), __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Any = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(__lowerCamelCase )], dim=1 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(__lowerCamelCase )], dim=0 )
SCREAMING_SNAKE_CASE_ : str = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(__lowerCamelCase )], dim=1 )
SCREAMING_SNAKE_CASE_ : int = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(__lowerCamelCase )], dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE_ : Tuple = filename
param_count += v.numel()
torch.save(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE_ : Dict = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
SCREAMING_SNAKE_CASE_ : Any = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(__lowerCamelCase )], dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(__lowerCamelCase )], dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE_ : int = filename
param_count += v.numel()
torch.save(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
# Write configs
SCREAMING_SNAKE_CASE_ : List[str] = {'total_size': param_count * 2}
write_json(__lowerCamelCase, os.path.join(__lowerCamelCase, 'pytorch_model.bin.index.json' ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
SCREAMING_SNAKE_CASE_ : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaConfig(
hidden_size=__lowerCamelCase, intermediate_size=compute_intermediate_size(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), num_attention_heads=params['n_heads'], num_hidden_layers=params['n_layers'], rms_norm_eps=params['norm_eps'], num_key_value_heads=__lowerCamelCase, )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
SCREAMING_SNAKE_CASE_ : int = LlamaForCausalLM.from_pretrained(__lowerCamelCase, torch_dtype=torch.floataa, low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(__lowerCamelCase, safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def a__ ( A__, A__ ):
# Initialize the tokenizer based on the `spm` model
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--input_dir', help='Location of LLaMA weights, which contains tokenizer.model and model folders', )
parser.add_argument(
'--model_size', choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'], )
parser.add_argument(
'--output_dir', help='Location to write HF model and tokenizer', )
parser.add_argument('--safe_serialization', type=__lowerCamelCase, help='Whether or not to save using `safetensors`.' )
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(args.input_dir, 'tokenizer.model' )
write_tokenizer(args.output_dir, __lowerCamelCase )
if __name__ == "__main__":
main()
| 101 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(1 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.align_to(_lowercase , _lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , )
_lowerCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=2.5 ) , Write(_lowercase ) , Write(_lowercase ) )
self.add(_lowercase )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
_lowerCAmelCase = 0.46 / 4
_lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowercase , buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 5 | 0 |
def __UpperCamelCase ( _lowerCAmelCase = 10 , _lowerCAmelCase = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , __lowerCamelCase )
UpperCAmelCase = range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"{solution(10, 22) = }")
| 333 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase = False
try:
_lowercase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase = None , _lowercase = [] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = choices
_lowerCAmelCase = prompt
if sys.platform == "win32":
_lowerCAmelCase = """*"""
else:
_lowerCAmelCase = """➔ """
def _lowercase ( self , _lowercase , _lowercase = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowercase )
else:
forceWrite(self.choices[index] , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowercase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _lowercase ( self , _lowercase , _lowercase = 1 ):
"""simple docstring"""
_lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = int(chr(self.current_selection ) )
_lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowercase )
else:
return
else:
return
def _lowercase ( self , _lowercase = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase = int(builtins.input() )
except ValueError:
_lowerCAmelCase = default_choice
else:
_lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_lowercase , """\n""" )
return choice
| 5 | 0 |
import math
import unittest
def A ( _lowerCamelCase ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaises(_lowercase):
is_prime(-19)
self.assertFalse(
is_prime(0), "Zero doesn't have any positive factors, primes must have exactly two.", )
self.assertFalse(
is_prime(1), "One only has 1 positive factor, primes must have exactly two.", )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 500 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_lowercase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def A (__lowerCamelCase :Optional[int] ):
_lowerCAmelCase = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
_lowerCAmelCase = line.strip()
if line:
_lowerCAmelCase = line.split()
_lowerCAmelCase = line_number
_lowerCAmelCase = words[0]
_lowerCAmelCase = value
return result
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any] , __lowerCamelCase :List[str] ):
for attribute in key.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase = value[0]
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Dict , __lowerCamelCase :List[Any] , __lowerCamelCase :int ):
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = """.""".join([key, hf_param_name] )
else:
_lowerCAmelCase = key
_lowerCAmelCase = value if """lm_head""" in full_key else value[0]
_lowercase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def A (__lowerCamelCase :Any , __lowerCamelCase :int , __lowerCamelCase :List[str]=None , __lowerCamelCase :List[Any]=None ):
_lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase = """weight_v"""
elif "bias" in name:
_lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = """weight"""
else:
_lowerCAmelCase = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def A (__lowerCamelCase :Any , __lowerCamelCase :Dict , __lowerCamelCase :Dict ):
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase = True
else:
_lowerCAmelCase = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[int] , __lowerCamelCase :Any , __lowerCamelCase :List[Any] , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase = name.split(""".""" )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any]=None , __lowerCamelCase :Union[str, Any]=None , __lowerCamelCase :str=True , __lowerCamelCase :str=False ):
if config_path is not None:
_lowerCAmelCase = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase = read_txt_into_dict(__lowerCamelCase )
_lowerCAmelCase = idalabel
_lowerCAmelCase = WavaVecaForSequenceClassification(__lowerCamelCase )
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 0
_lowerCAmelCase = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_lowerCAmelCase = WavaVecaForCTC(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase = fairseq.tasks.setup_task(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_lowercase = parser.parse_args()
_lowercase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = BarthezTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BarthezTokenizerFast
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Optional[int] = True
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
_snake_case : Tuple = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_lowercase )
_snake_case : str = tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[str] = '<pad>'
_snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_lowercase ) , 101122 )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case : List[str] = [0, 57, 3018, 70307, 91, 2]
_snake_case : str = self.tokenizer(
_lowercase , max_length=len(_lowercase ) , padding=_lowercase , truncation=_lowercase , return_tensors='pt' )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_snake_case : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Optional[int] = self.get_rust_tokenizer()
_snake_case : Union[str, Any] = 'I was born in 92000, and this is falsé.'
_snake_case : Any = tokenizer.tokenize(_lowercase )
_snake_case : Dict = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_snake_case : Optional[Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
_snake_case : Any = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_snake_case : Tuple = self.get_rust_tokenizer()
_snake_case : str = tokenizer.encode(_lowercase )
_snake_case : Union[str, Any] = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_snake_case : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_lowercase , )
| 517 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''decision_transformer'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : str = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=17 , _lowercase=4 , _lowercase=128 , _lowercase=4_096 , _lowercase=True , _lowercase=1 , _lowercase=1_024 , _lowercase=3 , _lowercase=1 , _lowercase=None , _lowercase="relu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=False , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = state_dim
_lowerCAmelCase = act_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_ep_len
_lowerCAmelCase = action_tanh
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 5 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = KandinskyVaaControlnetImgaImgPipeline
UpperCAmelCase__: int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
UpperCAmelCase__: Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
UpperCAmelCase__: Dict = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__: str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Union[str, Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A__ : List[Any] = UNetaDConditionModel(**_lowercase )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
A__ : int = self.dummy_unet
A__ : Dict = self.dummy_movq
A__ : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A__ : str = DDIMScheduler(**_lowercase )
A__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , A__ , A__=0 ):
A__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
A__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
A__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
A__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Optional[int] = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
A__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
if str(_lowercase ).startswith("""mps""" ):
A__ : str = torch.manual_seed(_lowercase )
else:
A__ : Dict = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A__ : List[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
A__ : Tuple = """cpu"""
A__ : int = self.get_dummy_components()
A__ : Tuple = self.pipeline_class(**_lowercase )
A__ : str = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A__ : Tuple = pipe(**self.get_dummy_inputs(_lowercase ) )
A__ : Union[str, Any] = output.images
A__ : List[Any] = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
A__ : Optional[Any] = image[0, -3:, -3:, -1]
A__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : List[str] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
A__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A__ : Union[str, Any] = init_image.resize((512, 512) )
A__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A__ : Any = torch.from_numpy(np.array(_lowercase ) ).float() / 2_5_5.0
A__ : List[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
A__ : List[Any] = """A robot, 4k photo"""
A__ : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
A__ : Optional[int] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
A__ : Dict = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ , A__ : Tuple = pipe_prior(
_lowercase , image=_lowercase , strength=0.8_5 , generator=_lowercase , negative_prompt="""""" , ).to_tuple()
A__ : List[str] = pipeline(
image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , hint=_lowercase , generator=_lowercase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
A__ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 456 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_lowercase = None
_lowercase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_lowercase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def A (__lowerCamelCase :int , __lowerCamelCase :Optional[Any]=1 , __lowerCamelCase :List[Any]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A (__lowerCamelCase :Any ):
with open(__lowerCamelCase , """r""" ) as f:
return json.load(__lowerCamelCase )
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :int ):
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple=True ):
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """tmp""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = read_json(os.path.join(__lowerCamelCase , """params.json""" ) )
_lowerCAmelCase = NUM_SHARDS[model_size]
_lowerCAmelCase = params["""n_layers"""]
_lowerCAmelCase = params["""n_heads"""]
_lowerCAmelCase = n_heads // num_shards
_lowerCAmelCase = params["""dim"""]
_lowerCAmelCase = dim // n_heads
_lowerCAmelCase = 10_000.0
_lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , __lowerCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
_lowerCAmelCase = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase = n_heads
_lowerCAmelCase = n_heads_per_shard
_lowerCAmelCase = dim
# permute for sliced rotary
def permute(__lowerCamelCase :Optional[int] , __lowerCamelCase :str=n_heads , __lowerCamelCase :str=dim , __lowerCamelCase :List[Any]=dim ):
return w.view(__lowerCamelCase , dima // n_heads // 2 , 2 , __lowerCamelCase ).transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase = torch.load(os.path.join(__lowerCamelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_lowerCAmelCase = [
torch.load(os.path.join(__lowerCamelCase , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__lowerCamelCase )
]
_lowerCAmelCase = 0
_lowerCAmelCase = {"""weight_map""": {}}
for layer_i in range(__lowerCamelCase ):
_lowerCAmelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_lowerCAmelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_lowerCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__lowerCamelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__lowerCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
# Write configs
_lowerCAmelCase = {"""total_size""": param_count * 2}
write_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """pytorch_model.bin.index.json""" ) )
_lowerCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_lowerCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
_lowerCAmelCase = LlamaConfig(
hidden_size=__lowerCamelCase , intermediate_size=compute_intermediate_size(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__lowerCamelCase , )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_lowerCAmelCase = LlamaForCausalLM.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__lowerCamelCase , safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any] ):
# Initialize the tokenizer based on the `spm` model
_lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def A ():
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__lowerCamelCase , help="""Whether or not to save using `safetensors`.""" )
_lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5 | 0 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class _snake_case ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase__ ='''align_text_model'''
def __init__( self : List[Any] , snake_case : Optional[Any]=30_522 , snake_case : Any=768 , snake_case : Optional[int]=12 , snake_case : Dict=12 , snake_case : int=3_072 , snake_case : List[str]="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Any=0.1 , snake_case : Optional[Any]=512 , snake_case : Tuple=2 , snake_case : List[str]=0.02 , snake_case : List[str]=1e-12 , snake_case : Any=0 , snake_case : List[Any]="absolute" , snake_case : Union[str, Any]=True , **snake_case : Optional[Any] , ):
super().__init__(**_lowercase )
UpperCAmelCase_ :str = vocab_size
UpperCAmelCase_ :Tuple = hidden_size
UpperCAmelCase_ :Optional[Any] = num_hidden_layers
UpperCAmelCase_ :str = num_attention_heads
UpperCAmelCase_ :Dict = hidden_act
UpperCAmelCase_ :List[str] = intermediate_size
UpperCAmelCase_ :str = hidden_dropout_prob
UpperCAmelCase_ :Any = attention_probs_dropout_prob
UpperCAmelCase_ :Union[str, Any] = max_position_embeddings
UpperCAmelCase_ :List[Any] = type_vocab_size
UpperCAmelCase_ :List[str] = initializer_range
UpperCAmelCase_ :Optional[int] = layer_norm_eps
UpperCAmelCase_ :Dict = position_embedding_type
UpperCAmelCase_ :Dict = use_cache
UpperCAmelCase_ :Dict = pad_token_id
@classmethod
def snake_case_ ( cls : str , snake_case : List[str] , **snake_case : Any ):
cls._set_token_in_kwargs(_lowercase )
UpperCAmelCase_ ,UpperCAmelCase_ :str = cls.get_config_dict(_lowercase , **_lowercase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCAmelCase_ :Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _snake_case ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase__ ='''align_vision_model'''
def __init__( self : Any , snake_case : Optional[int] = 3 , snake_case : Dict = 600 , snake_case : List[str] = 2.0 , snake_case : Dict = 3.1 , snake_case : Dict = 8 , snake_case : Dict = [3, 3, 5, 3, 5, 5, 3] , snake_case : Optional[Any] = [32, 16, 24, 40, 80, 112, 192] , snake_case : Tuple = [16, 24, 40, 80, 112, 192, 320] , snake_case : Union[str, Any] = [] , snake_case : Any = [1, 2, 2, 2, 1, 2, 1] , snake_case : Optional[int] = [1, 2, 2, 3, 3, 4, 1] , snake_case : Optional[int] = [1, 6, 6, 6, 6, 6, 6] , snake_case : Union[str, Any] = 0.25 , snake_case : str = "swish" , snake_case : List[Any] = 2_560 , snake_case : Union[str, Any] = "mean" , snake_case : Optional[int] = 0.02 , snake_case : str = 0.001 , snake_case : Optional[int] = 0.99 , snake_case : Optional[Any] = 0.2 , **snake_case : Dict , ):
super().__init__(**_lowercase )
UpperCAmelCase_ :Optional[int] = num_channels
UpperCAmelCase_ :Tuple = image_size
UpperCAmelCase_ :List[Any] = width_coefficient
UpperCAmelCase_ :int = depth_coefficient
UpperCAmelCase_ :Optional[int] = depth_divisor
UpperCAmelCase_ :Optional[Any] = kernel_sizes
UpperCAmelCase_ :Optional[int] = in_channels
UpperCAmelCase_ :Dict = out_channels
UpperCAmelCase_ :Optional[Any] = depthwise_padding
UpperCAmelCase_ :Optional[int] = strides
UpperCAmelCase_ :Optional[Any] = num_block_repeats
UpperCAmelCase_ :List[str] = expand_ratios
UpperCAmelCase_ :List[str] = squeeze_expansion_ratio
UpperCAmelCase_ :Optional[Any] = hidden_act
UpperCAmelCase_ :str = hidden_dim
UpperCAmelCase_ :Union[str, Any] = pooling_type
UpperCAmelCase_ :List[Any] = initializer_range
UpperCAmelCase_ :str = batch_norm_eps
UpperCAmelCase_ :str = batch_norm_momentum
UpperCAmelCase_ :Any = drop_connect_rate
UpperCAmelCase_ :List[str] = sum(_lowercase ) * 4
@classmethod
def snake_case_ ( cls : Optional[Any] , snake_case : str , **snake_case : Optional[int] ):
cls._set_token_in_kwargs(_lowercase )
UpperCAmelCase_ ,UpperCAmelCase_ :Optional[Any] = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCAmelCase_ :Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _snake_case ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase__ ='''align'''
UpperCamelCase__ =True
def __init__( self : List[Any] , snake_case : str=None , snake_case : Any=None , snake_case : Union[str, Any]=640 , snake_case : Any=1.0 , snake_case : List[str]=0.02 , **snake_case : Any , ):
super().__init__(**_lowercase )
if text_config is None:
UpperCAmelCase_ :List[str] = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
UpperCAmelCase_ :str = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
UpperCAmelCase_ :Optional[int] = AlignTextConfig(**_lowercase )
UpperCAmelCase_ :Optional[Any] = AlignVisionConfig(**_lowercase )
UpperCAmelCase_ :Tuple = projection_dim
UpperCAmelCase_ :str = temperature_init_value
UpperCAmelCase_ :List[str] = initializer_range
@classmethod
def snake_case_ ( cls : Dict , snake_case : Optional[Any] , snake_case : Dict , **snake_case : Union[str, Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ :Optional[int] = self.text_config.to_dict()
UpperCAmelCase_ :List[str] = self.vision_config.to_dict()
UpperCAmelCase_ :Optional[int] = self.__class__.model_type
return output
| 608 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = (DDPMScheduler,)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowercase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 5 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def __A ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
__SCREAMING_SNAKE_CASE : Dict = Path(__lowerCamelCase ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , __lowerCamelCase , with_cuda=__lowerCamelCase , extra_include_paths=[str(__lowerCamelCase )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 211 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_SCREAMING_SNAKE_CASE )} )
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowercase : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowercase : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowercase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowercase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''train'''
_lowercase : Union[str, Any] = '''dev'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = False , _lowercase = None , _lowercase = "pt" , ):
"""simple docstring"""
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features["""features"""]
_lowerCAmelCase = self.old_features.get("""dataset""" , _lowercase )
_lowerCAmelCase = self.old_features.get("""examples""" , _lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
_lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 5 | 0 |
from maths.prime_check import is_prime
def UpperCamelCase( __UpperCamelCase : int ):
if not isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase_ : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''dpr'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase = 0 , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = projection_dim
_lowerCAmelCase = position_embedding_type
| 5 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int = 3, UpperCAmelCase_ : int = 7, UpperCAmelCase_ : int = 1000000 ) -> Dict:
"""simple docstring"""
A__ = 0
A__ = 1
for current_denominator in range(1, limit + 1 ):
A__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
A__ = current_numerator
A__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 104 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_lowercase = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_lowercase = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_lowercase = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase ( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase ( self , _lowercase , _lowercase , _lowercase=None , _lowercase="uniform_average" , _lowercase=True ):
"""simple docstring"""
_lowerCAmelCase = mean_squared_error(
_lowercase , _lowercase , sample_weight=_lowercase , multioutput=_lowercase , squared=_lowercase )
return {"mse": mse}
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__lowercase = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 203 |
'''simple docstring'''
def A ():
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 631 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 5 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=3_6 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = embedding_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_groups
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : List[str] = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlbertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE_ : List[str] = model(_lowercase , token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = AlbertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , sentence_order_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = AlbertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AlbertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Dict = AlbertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = AlbertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.num_choices
SCREAMING_SNAKE_CASE_ : Any = AlbertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlbertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self , config_class=_lowercase , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : List[Any] = type
self.model_tester.create_and_check_model(*_lowercase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Dict = AlbertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AlbertModel.from_pretrained('albert-base-v2' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_lowercase , attention_mask=_lowercase )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 101 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 5 | 0 |
from __future__ import annotations
from math import gcd
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
return (pow(__lowerCamelCase , 2 ) + step) % modulus
for _ in range(__lowerCamelCase ):
# These track the position within the cycle detection logic.
UpperCAmelCase = seed
UpperCAmelCase = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCAmelCase = rand_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCAmelCase = rand_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCAmelCase = rand_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCAmelCase = gcd(hare - tortoise , __lowerCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCAmelCase = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase =parser.parse_args()
__lowerCAmelCase =pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"{args.num} is probably prime")
else:
__lowerCAmelCase =args.num // divisor
print(f"{args.num} = {divisor} * {quotient}")
| 333 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase = BeautifulSoup(res.text, """html.parser""")
_lowercase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 5 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = None
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
lowerCamelCase__ = 2
@register_to_config
def __init__( self, __a = 0.02, __a = 100, __a = 1.007, __a = 80, __a = 0.05, __a = 50, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = sigma_max
# setable values
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : str = None
_lowerCAmelCase : Tuple = None # sigma(t_i)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = num_inference_steps
_lowerCAmelCase : int = np.arange(0, self.num_inference_steps)[::-1].copy()
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowercase).to(_lowercase)
_lowerCAmelCase : List[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_lowerCAmelCase : List[str] = torch.tensor(_lowercase, dtype=torch.floataa, device=_lowercase)
def snake_case__ ( self, __a, __a, __a = None):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase : Union[str, Any] = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1)
else:
_lowerCAmelCase : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase : str = self.config.s_noise * randn_tensor(sample.shape, generator=_lowercase).to(sample.device)
_lowerCAmelCase : int = sigma + gamma * sigma
_lowerCAmelCase : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case__ ( self, __a, __a, __a, __a, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sample_hat + sigma_hat * model_output
_lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase : str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowercase, derivative=_lowercase, pred_original_sample=_lowercase)
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = sample_prev + sigma_prev * model_output
_lowerCAmelCase : int = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowercase, derivative=_lowercase, pred_original_sample=_lowercase)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
raise NotImplementedError()
| 500 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A ():
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_lowerCAmelCase = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case : Optional[Any] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_snake_case : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_snake_case : int = model(_lowercase )['last_hidden_state']
_snake_case : Optional[int] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
_snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 517 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _a (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = XLMProphetNetTokenizer
UpperCAmelCase__: str = False
UpperCAmelCase__: List[Any] = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Dict = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
A__ : Optional[int] = """[PAD]"""
A__ : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def __A ( self ):
A__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_lowercase ) , 1012 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def __A ( self ):
A__ : Any = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
A__ : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : List[str] = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
A__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def __A ( self ):
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def __A ( self ):
A__ : List[str] = """Hello World!"""
A__ : Union[str, Any] = [3_5389, 6672, 49, 2]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def __A ( self ):
A__ : Dict = {"""input_ids""": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 456 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
"""simple docstring"""
def a ( ):
'''simple docstring'''
for n in range(1, 1000000 ):
yield n * (n + 1) // 2
def a ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ :Union[str, Any] = 1
UpperCAmelCase_ :Optional[Any] = 2
while i * i <= n:
UpperCAmelCase_ :Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def a ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 608 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __A ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
TestCommand.register_subcommand(__lowerCamelCase )
RunBeamCommand.register_subcommand(__lowerCamelCase )
DummyDataCommand.register_subcommand(__lowerCamelCase )
# Parse args
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = parser.parse_known_args()
if not hasattr(__lowerCamelCase , "func" ):
parser.print_help()
exit(1 )
__SCREAMING_SNAKE_CASE : Any = parse_unknown_args(__lowerCamelCase )
# Run
__SCREAMING_SNAKE_CASE : List[Any] = args.func(__lowerCamelCase , **__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 211 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = ['''input_values''', '''padding_mask''']
def __init__( self , _lowercase = 1 , _lowercase = 24_000 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , **_lowercase , ):
"""simple docstring"""
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _lowercase , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
_lowerCAmelCase = True
_lowerCAmelCase = bool(
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_lowerCAmelCase = [np.asarray(_lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray(_lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(_lowercase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
_lowerCAmelCase = None
_lowerCAmelCase = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowerCAmelCase = min(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.floor(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowerCAmelCase = max(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.ceil(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowerCAmelCase = """max_length"""
else:
_lowerCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
_lowerCAmelCase = self.pad(
_lowercase , max_length=_lowercase , truncation=_lowercase , padding=_lowercase , return_attention_mask=_lowercase , )
if padding:
_lowerCAmelCase = padded_inputs.pop("""attention_mask""" )
_lowerCAmelCase = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
_lowerCAmelCase = example[..., None]
input_values.append(example.T )
_lowerCAmelCase = input_values
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 5 | 0 |
A__ : List[Any] = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__ : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__ : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 171 |
'''simple docstring'''
_lowercase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( UpperCAmelCase_ : list[int] ) -> List[str]:
"""simple docstring"""
return len(set(__lowerCamelCase ) ) == len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
import functools
def A (__lowerCamelCase :list[int] , __lowerCamelCase :list[int] ):
# Validation
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__lowerCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
_lowerCAmelCase = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase :int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
A_ = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
A_ = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
A_ = max(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCamelCase ) , b_binary.zfill(__lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 384
if "tiny" in model_name:
_lowerCAmelCase = [3, 3, 9, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [128, 256, 512, 1024]
_lowerCAmelCase = 512
if "large" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [192, 384, 768, 1536]
_lowerCAmelCase = 768
if "xlarge" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [256, 512, 1024, 2048]
_lowerCAmelCase = 1024
# set label information
_lowerCAmelCase = 150
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """ade20k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = ConvNextConfig(
depths=__lowerCamelCase , hidden_sizes=__lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
_lowerCAmelCase = UperNetConfig(
backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def A (__lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Dict , __lowerCamelCase :Tuple ):
_lowerCAmelCase = dct.pop(__lowerCamelCase )
_lowerCAmelCase = val
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Any ):
_lowerCAmelCase = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""state_dict"""]
_lowerCAmelCase = get_upernet_config(__lowerCamelCase )
_lowerCAmelCase = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(__lowerCamelCase )
if "bn" in key:
_lowerCAmelCase = key.replace("""bn""" , """batch_norm""" )
_lowerCAmelCase = val
# rename keys
_lowerCAmelCase = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
_lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_lowerCAmelCase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
_lowerCAmelCase = SegformerImageProcessor()
_lowerCAmelCase = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_lowerCAmelCase = model(__lowerCamelCase )
if model_name == "upernet-convnext-tiny":
_lowerCAmelCase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_lowerCAmelCase = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_lowerCAmelCase = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_lowerCAmelCase = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_lowerCAmelCase = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 631 |
'''simple docstring'''
from itertools import product
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
_lowerCAmelCase = sides_number
_lowerCAmelCase = max_face_number * dice_number
_lowerCAmelCase = [0] * (max_total + 1)
_lowerCAmelCase = 1
_lowerCAmelCase = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
_lowerCAmelCase = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def A ():
_lowerCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCAmelCase = 0
_lowerCAmelCase = 9
_lowerCAmelCase = 4 * 9
_lowerCAmelCase = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCAmelCase = (4**9) * (6**6)
_lowerCAmelCase = peter_wins_count / total_games_number
_lowerCAmelCase = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : Optional[int] = (3_2, 3_2)
SCREAMING_SNAKE_CASE_ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
def extract(*lowerCAmelCase__ , **lowerCAmelCase__ ):
class __lowercase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.ones([0] )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
self.pixel_values.to(_lowercase )
return self
return Out()
return extract
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_vae
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : int = StableDiffusionPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ : int = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE_ : Any = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=_lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
SCREAMING_SNAKE_CASE_ : Any = output.images
SCREAMING_SNAKE_CASE_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=_lowercase , )[0]
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : int = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ : Tuple = PNDMScheduler(skip_prk_steps=_lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_vae
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : Any = StableDiffusionPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE_ : int = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device=_lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
SCREAMING_SNAKE_CASE_ : List[Any] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device=_lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=_lowercase , )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(pipe.scheduler , _lowercase )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_ : List[Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = StableDiffusionPipeline.from_pretrained(_lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_ : List[Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ : str = PNDMScheduler(skip_prk_steps=_lowercase )
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_vae
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
SCREAMING_SNAKE_CASE_ : Optional[Any] = unet.half()
SCREAMING_SNAKE_CASE_ : str = vae.half()
SCREAMING_SNAKE_CASE_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE_ : int = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=_lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[Any] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
SCREAMING_SNAKE_CASE_ : str = 4_0_0_3_6_6_0_3_4_6
SCREAMING_SNAKE_CASE_ : str = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(_lowercase )
SCREAMING_SNAKE_CASE_ : int = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_ : Dict = output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : int = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(_lowercase )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_ : Dict = output.images
SCREAMING_SNAKE_CASE_ : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=_lowercase )
SCREAMING_SNAKE_CASE_ : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : int = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE_ : Dict = 'padme amidala taking a bath artwork, safe for work, no nudity'
SCREAMING_SNAKE_CASE_ : Tuple = 2_7_3_4_9_7_1_7_5_5
SCREAMING_SNAKE_CASE_ : int = 7
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(_lowercase )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_ : str = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[int] = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(_lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[int] = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1_0_4_4_3_5_5_2_3_4
SCREAMING_SNAKE_CASE_ : List[str] = 1_2
SCREAMING_SNAKE_CASE_ : Dict = torch.manual_seed(_lowercase )
SCREAMING_SNAKE_CASE_ : Any = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE_ : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(_lowercase )
SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_ : Any = output.images
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 101 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(1 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.align_to(_lowercase , _lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , )
_lowerCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=2.5 ) , Write(_lowercase ) , Write(_lowercase ) )
self.add(_lowercase )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
_lowerCAmelCase = 0.46 / 4
_lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowercase , buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 5 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ="The Nymphenburg Palace is a beautiful palace in Munich!"
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 10_24,
"hidden_size": 7_68,
"max_length": 5_12,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 10_24,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
UpperCAmelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCAmelCase = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCAmelCase = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
UpperCAmelCase = os.path.join(get_home_dir() , "models" )
UpperCAmelCase = _load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
UpperCAmelCase = nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
UpperCAmelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCAmelCase = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(__lowerCamelCase ),
}
UpperCAmelCase = BertConfig.from_dict(__lowerCamelCase )
UpperCAmelCase = BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_lowerCAmelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase = hf_param.shape
UpperCAmelCase = to_torch(params[gluon_param] )
UpperCAmelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCAmelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCAmelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCAmelCase = layer.attention.self
UpperCAmelCase = check_and_map_params(
self_attn.key.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
UpperCAmelCase = check_and_map_params(
self_attn.key.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
UpperCAmelCase = check_and_map_params(
self_attn.query.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
UpperCAmelCase = check_and_map_params(
self_attn.query.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
UpperCAmelCase = check_and_map_params(
self_attn.value.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
UpperCAmelCase = check_and_map_params(
self_attn.value.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
UpperCAmelCase = layer.attention.output
UpperCAmelCase = check_and_map_params(
self_output.dense.bias , F'''encoder.transformer_cells.{i}.proj.bias''' )
UpperCAmelCase = check_and_map_params(
self_output.dense.weight , F'''encoder.transformer_cells.{i}.proj.weight''' )
UpperCAmelCase = check_and_map_params(
self_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
UpperCAmelCase = check_and_map_params(
self_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
UpperCAmelCase = layer.intermediate
UpperCAmelCase = check_and_map_params(
intermediate.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
UpperCAmelCase = check_and_map_params(
intermediate.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
UpperCAmelCase = layer.output
UpperCAmelCase = check_and_map_params(
bert_output.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
UpperCAmelCase = check_and_map_params(
bert_output.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
UpperCAmelCase = check_and_map_params(
bert_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
UpperCAmelCase = check_and_map_params(
bert_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCAmelCase = RobertaTokenizer.from_pretrained("roberta-base" )
UpperCAmelCase = tokenizer.encode_plus(__lowerCamelCase )["input_ids"]
# Get gluon output
UpperCAmelCase = mx.nd.array([input_ids] )
UpperCAmelCase = original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
UpperCAmelCase = BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
UpperCAmelCase = tokenizer.encode_plus(__lowerCamelCase , return_tensors="pt" )
UpperCAmelCase = hf_bort_model(**__lowerCamelCase )[0]
UpperCAmelCase = output_gluon[0].asnumpy()
UpperCAmelCase = output_hf[0].detach().numpy()
UpperCAmelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCAmelCase = np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , __lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 333 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase = False
try:
_lowercase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase = None , _lowercase = [] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = choices
_lowerCAmelCase = prompt
if sys.platform == "win32":
_lowerCAmelCase = """*"""
else:
_lowerCAmelCase = """➔ """
def _lowercase ( self , _lowercase , _lowercase = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowercase )
else:
forceWrite(self.choices[index] , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowercase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _lowercase ( self , _lowercase , _lowercase = 1 ):
"""simple docstring"""
_lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = int(chr(self.current_selection ) )
_lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowercase )
else:
return
else:
return
def _lowercase ( self , _lowercase = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase = int(builtins.input() )
except ValueError:
_lowerCAmelCase = default_choice
else:
_lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_lowercase , """\n""" )
return choice
| 5 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = ["model.decoder.embed_positions.weights"]
def A ( _lowerCamelCase ):
'''simple docstring'''
if "emb" in name:
_lowerCAmelCase : Optional[int] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
_lowerCAmelCase : Optional[int] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
_lowerCAmelCase : Tuple = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
_lowerCAmelCase : Any = name.replace("linear1" , "fc1" )
if "linear2" in name:
_lowerCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
_lowerCAmelCase : List[str] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
_lowerCAmelCase : int = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
_lowerCAmelCase : str = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
_lowerCAmelCase : str = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
_lowerCAmelCase : int = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_lowerCAmelCase : str = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = list(state_dict.keys() )
_lowerCAmelCase : str = {}
for key in keys:
_lowerCAmelCase : Tuple = state_dict.pop(__lowerCamelCase )
_lowerCAmelCase : int = rename_keys(__lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
_lowerCAmelCase : List[str] = val[:hidden_size, :]
_lowerCAmelCase : Union[str, Any] = val[hidden_size : 2 * hidden_size, :]
_lowerCAmelCase : List[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_lowerCAmelCase : Optional[int] = val
else:
_lowerCAmelCase : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def A ( _lowerCamelCase ):
'''simple docstring'''
if checkpoint == "small":
# default config values
_lowerCAmelCase : Tuple = 1_024
_lowerCAmelCase : int = 24
_lowerCAmelCase : int = 16
elif checkpoint == "medium":
_lowerCAmelCase : int = 1_536
_lowerCAmelCase : Any = 48
_lowerCAmelCase : List[Any] = 24
elif checkpoint == "large":
_lowerCAmelCase : List[Any] = 2_048
_lowerCAmelCase : List[Any] = 48
_lowerCAmelCase : List[str] = 32
else:
raise ValueError(F"Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}." )
_lowerCAmelCase : Optional[int] = MusicgenDecoderConfig(
hidden_size=__lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , )
return config
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="cpu" ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = MusicGen.get_pretrained(__lowerCamelCase , device=__lowerCamelCase )
_lowerCAmelCase : Optional[int] = decoder_config_from_checkpoint(__lowerCamelCase )
_lowerCAmelCase : Optional[int] = fairseq_model.lm.state_dict()
_lowerCAmelCase , _lowerCAmelCase : Dict = rename_state_dict(
__lowerCamelCase , hidden_size=decoder_config.hidden_size )
_lowerCAmelCase : Optional[int] = TaEncoderModel.from_pretrained("t5-base" )
_lowerCAmelCase : List[str] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_lowerCAmelCase : str = MusicgenForCausalLM(__lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = decoder.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__lowerCamelCase ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
_lowerCAmelCase : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=__lowerCamelCase , audio_encoder=__lowerCamelCase , decoder=__lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCamelCase )
# check we can do a forward pass
_lowerCAmelCase : List[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_lowerCAmelCase : Tuple = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_lowerCAmelCase : str = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("t5-base" )
_lowerCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
_lowerCAmelCase : Optional[int] = MusicgenProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
# set the appropriate bos/pad token ids
_lowerCAmelCase : Union[str, Any] = 2_048
_lowerCAmelCase : int = 2_048
# set other default generation config params
_lowerCAmelCase : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : str = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__lowerCamelCase )
processor.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_snake_case = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 500 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_lowercase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def A (__lowerCamelCase :Optional[int] ):
_lowerCAmelCase = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
_lowerCAmelCase = line.strip()
if line:
_lowerCAmelCase = line.split()
_lowerCAmelCase = line_number
_lowerCAmelCase = words[0]
_lowerCAmelCase = value
return result
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any] , __lowerCamelCase :List[str] ):
for attribute in key.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase = value[0]
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Dict , __lowerCamelCase :List[Any] , __lowerCamelCase :int ):
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = """.""".join([key, hf_param_name] )
else:
_lowerCAmelCase = key
_lowerCAmelCase = value if """lm_head""" in full_key else value[0]
_lowercase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def A (__lowerCamelCase :Any , __lowerCamelCase :int , __lowerCamelCase :List[str]=None , __lowerCamelCase :List[Any]=None ):
_lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase = """weight_v"""
elif "bias" in name:
_lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = """weight"""
else:
_lowerCAmelCase = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def A (__lowerCamelCase :Any , __lowerCamelCase :Dict , __lowerCamelCase :Dict ):
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase = True
else:
_lowerCAmelCase = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[int] , __lowerCamelCase :Any , __lowerCamelCase :List[Any] , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase = name.split(""".""" )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any]=None , __lowerCamelCase :Union[str, Any]=None , __lowerCamelCase :str=True , __lowerCamelCase :str=False ):
if config_path is not None:
_lowerCAmelCase = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase = read_txt_into_dict(__lowerCamelCase )
_lowerCAmelCase = idalabel
_lowerCAmelCase = WavaVecaForSequenceClassification(__lowerCamelCase )
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 0
_lowerCAmelCase = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_lowerCAmelCase = WavaVecaForCTC(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase = fairseq.tasks.setup_task(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_lowercase = parser.parse_args()
_lowercase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 5 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCamelCase__ ( a__ , a__ , a__ , a__ , a__ = None , a__ = None , a__ = None , ) -> Tuple:
"""simple docstring"""
if config_name_or_path is None:
_snake_case : List[Any] = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
_snake_case : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_snake_case : Tuple = question_encoder_name_or_path
_snake_case : List[Any] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
_snake_case : Tuple = RagConfig.from_pretrained(__lowerCamelCase)
_snake_case : int = AutoConfig.from_pretrained(__lowerCamelCase)
_snake_case : List[str] = AutoConfig.from_pretrained(__lowerCamelCase)
_snake_case : Union[str, Any] = gen_config
_snake_case : Optional[Any] = question_encoder_config
_snake_case : int = model_class.from_pretrained_question_encoder_generator(
__lowerCamelCase , __lowerCamelCase , config=__lowerCamelCase)
rag_model.save_pretrained(__lowerCamelCase)
# Sanity check.
model_class.from_pretrained(__lowerCamelCase)
# Save tokenizers.
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCamelCase)
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/')
_snake_case : str = AutoTokenizer.from_pretrained(__lowerCamelCase)
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/')
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 517 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''decision_transformer'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : str = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=17 , _lowercase=4 , _lowercase=128 , _lowercase=4_096 , _lowercase=True , _lowercase=1 , _lowercase=1_024 , _lowercase=3 , _lowercase=1 , _lowercase=None , _lowercase="relu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=False , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = state_dim
_lowerCAmelCase = act_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_ep_len
_lowerCAmelCase = action_tanh
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 5 | 0 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A_ : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_SCREAMING_SNAKE_CASE )} )
UpperCAmelCase__: str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
UpperCAmelCase__: int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__: int = field(
default=1_28 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
UpperCAmelCase__: int = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
UpperCAmelCase__: int = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
UpperCAmelCase__: bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__: bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
UpperCAmelCase__: float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
UpperCAmelCase__: int = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
UpperCAmelCase__: int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
UpperCAmelCase__: int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class _a (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__: str = '''train'''
UpperCAmelCase__: Union[str, Any] = '''dev'''
class _a (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__: SquadDataTrainingArguments
UpperCAmelCase__: List[SquadFeatures]
UpperCAmelCase__: Split
UpperCAmelCase__: bool
def __init__( self , A__ , A__ , A__ = None , A__ = Split.train , A__ = False , A__ = None , A__ = "pt" , ):
A__ : List[Any] = args
A__ : List[str] = is_language_sensitive
A__ : Optional[int] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
A__ : List[str] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
A__ : Tuple = mode
# Load data features from cache or dataset file
A__ : Union[str, Any] = """v2""" if args.version_2_with_negative else """v1"""
A__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ : Any = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
A__ : Union[str, Any] = time.time()
A__ : int = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ : Optional[int] = self.old_features["""features"""]
A__ : Optional[Any] = self.old_features.get("""dataset""" , _lowercase )
A__ : List[Any] = self.old_features.get("""examples""" , _lowercase )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
""" future run""" )
else:
if mode == Split.dev:
A__ : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
else:
A__ : int = self.processor.get_train_examples(args.data_dir )
A__ , A__ : int = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
A__ : Union[str, Any] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
return len(self.features )
def __getitem__( self , A__ ):
A__ : List[Any] = self.features[i]
A__ : str = torch.tensor(feature.input_ids , dtype=torch.long )
A__ : Optional[int] = torch.tensor(feature.attention_mask , dtype=torch.long )
A__ : Optional[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
A__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
A__ : str = torch.tensor(feature.p_mask , dtype=torch.float )
A__ : Optional[int] = torch.tensor(feature.is_impossible , dtype=torch.float )
A__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ : int = torch.tensor(feature.start_position , dtype=torch.long )
A__ : Union[str, Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 456 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_lowercase = None
_lowercase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_lowercase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def A (__lowerCamelCase :int , __lowerCamelCase :Optional[Any]=1 , __lowerCamelCase :List[Any]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A (__lowerCamelCase :Any ):
with open(__lowerCamelCase , """r""" ) as f:
return json.load(__lowerCamelCase )
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :int ):
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple=True ):
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """tmp""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = read_json(os.path.join(__lowerCamelCase , """params.json""" ) )
_lowerCAmelCase = NUM_SHARDS[model_size]
_lowerCAmelCase = params["""n_layers"""]
_lowerCAmelCase = params["""n_heads"""]
_lowerCAmelCase = n_heads // num_shards
_lowerCAmelCase = params["""dim"""]
_lowerCAmelCase = dim // n_heads
_lowerCAmelCase = 10_000.0
_lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , __lowerCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
_lowerCAmelCase = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase = n_heads
_lowerCAmelCase = n_heads_per_shard
_lowerCAmelCase = dim
# permute for sliced rotary
def permute(__lowerCamelCase :Optional[int] , __lowerCamelCase :str=n_heads , __lowerCamelCase :str=dim , __lowerCamelCase :List[Any]=dim ):
return w.view(__lowerCamelCase , dima // n_heads // 2 , 2 , __lowerCamelCase ).transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase = torch.load(os.path.join(__lowerCamelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_lowerCAmelCase = [
torch.load(os.path.join(__lowerCamelCase , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__lowerCamelCase )
]
_lowerCAmelCase = 0
_lowerCAmelCase = {"""weight_map""": {}}
for layer_i in range(__lowerCamelCase ):
_lowerCAmelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_lowerCAmelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_lowerCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__lowerCamelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__lowerCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
# Write configs
_lowerCAmelCase = {"""total_size""": param_count * 2}
write_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """pytorch_model.bin.index.json""" ) )
_lowerCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_lowerCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
_lowerCAmelCase = LlamaConfig(
hidden_size=__lowerCamelCase , intermediate_size=compute_intermediate_size(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__lowerCamelCase , )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_lowerCAmelCase = LlamaForCausalLM.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__lowerCamelCase , safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any] ):
# Initialize the tokenizer based on the `spm` model
_lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def A ():
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__lowerCamelCase , help="""Whether or not to save using `safetensors`.""" )
_lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 608 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = (DDPMScheduler,)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowercase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 5 | 0 |
'''simple docstring'''
from collections.abc import Callable
def __A ( _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = a
__SCREAMING_SNAKE_CASE : str = b
if function(__lowerCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__lowerCamelCase ) == 0:
return b
elif (
function(__lowerCamelCase ) * function(__lowerCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
__SCREAMING_SNAKE_CASE : Any = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(__lowerCamelCase ) == 0:
return mid
elif function(__lowerCamelCase ) * function(__lowerCamelCase ) < 0:
__SCREAMING_SNAKE_CASE : List[Any] = mid
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
__SCREAMING_SNAKE_CASE : Union[str, Any] = start + (end - start) / 2.0
return mid
def __A ( _SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 211 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_SCREAMING_SNAKE_CASE )} )
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowercase : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowercase : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowercase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowercase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''train'''
_lowercase : Union[str, Any] = '''dev'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = False , _lowercase = None , _lowercase = "pt" , ):
"""simple docstring"""
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features["""features"""]
_lowerCAmelCase = self.old_features.get("""dataset""" , _lowercase )
_lowerCAmelCase = self.old_features.get("""examples""" , _lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
_lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 5 | 0 |
from datetime import datetime
import requests
def UpperCamelCase( __UpperCamelCase : str ):
lowerCAmelCase_ : str = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowerCAmelCase_ : Union[str, Any] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__lowerCamelCase ).content
if __name__ == "__main__":
A__ : Dict = input('''Enter Video/IGTV url: ''').strip()
A__ : List[str] = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 171 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''dpr'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase = 0 , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = projection_dim
_lowerCAmelCase = position_embedding_type
| 5 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=0 ) -> Union[str, Any]:
A__ = floats_tensor((1, 3, 128, 128) , rng=random.Random(_lowercase ) )
A__ = torch.manual_seed(_lowercase )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self ) -> Union[str, Any]:
A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowercase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowercase ).images
A__ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def snake_case__ ( self ) -> Dict:
A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowercase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case__ ( self ) -> List[Any]:
A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowercase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case__ ( self ) -> Union[str, Any]:
A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowercase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case__ ( self ) -> Optional[Any]:
A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowercase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case__ ( self ) -> Optional[int]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self ) -> str:
A__ = ort.SessionOptions()
A__ = False
return options
def snake_case__ ( self ) -> str:
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A__ = init_image.resize((128, 128) )
# using the PNDM scheduler by default
A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
A__ = "A fantasy landscape, trending on artstation"
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=_lowercase , image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type="np" , )
A__ = output.images
A__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A__ = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def snake_case__ ( self ) -> Optional[int]:
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A__ = init_image.resize((128, 128) )
A__ = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
A__ = "A fantasy landscape, trending on artstation"
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=_lowercase , image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type="np" , )
A__ = output.images
A__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A__ = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 104 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_lowercase = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_lowercase = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_lowercase = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase ( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase ( self , _lowercase , _lowercase , _lowercase=None , _lowercase="uniform_average" , _lowercase=True ):
"""simple docstring"""
_lowerCAmelCase = mean_squared_error(
_lowercase , _lowercase , sample_weight=_lowercase , multioutput=_lowercase , squared=_lowercase )
return {"mse": mse}
| 5 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
__lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
__lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
__lowercase = BeautifulSoup(res.text, """html.parser""")
__lowercase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 203 |
'''simple docstring'''
def A ():
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 5 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : List[str] = '''mvp'''
lowerCamelCase_ : Any = ['''past_key_values''']
lowerCamelCase_ : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , UpperCAmelCase : Optional[int]=5_02_67 , UpperCAmelCase : Optional[int]=10_24 , UpperCAmelCase : str=12 , UpperCAmelCase : List[str]=40_96 , UpperCAmelCase : int=16 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=40_96 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : Union[str, Any]=10_24 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=2 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Union[str, Any]=1_00 , UpperCAmelCase : int=8_00 , **UpperCAmelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ :List[str] = vocab_size
SCREAMING_SNAKE_CASE_ :List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :Any = d_model
SCREAMING_SNAKE_CASE_ :Union[str, Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ :str = encoder_layers
SCREAMING_SNAKE_CASE_ :Tuple = encoder_attention_heads
SCREAMING_SNAKE_CASE_ :Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ :Union[str, Any] = decoder_layers
SCREAMING_SNAKE_CASE_ :Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE_ :int = dropout
SCREAMING_SNAKE_CASE_ :Any = attention_dropout
SCREAMING_SNAKE_CASE_ :List[str] = activation_dropout
SCREAMING_SNAKE_CASE_ :Any = activation_function
SCREAMING_SNAKE_CASE_ :Union[str, Any] = init_std
SCREAMING_SNAKE_CASE_ :Union[str, Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ :Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE_ :Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE_ :Any = use_cache
SCREAMING_SNAKE_CASE_ :Tuple = encoder_layers
SCREAMING_SNAKE_CASE_ :List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ :Dict = use_prompt
SCREAMING_SNAKE_CASE_ :List[str] = prompt_length
SCREAMING_SNAKE_CASE_ :List[str] = prompt_mid_dim
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , _lowercase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed.")
| 631 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 5 | 0 |
def a__ ( A__ ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCAmelCase__ : Dict =int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 101 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 5 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __magic_name__ ( _SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = '''ibert'''
def __init__( self : Dict ,__SCREAMING_SNAKE_CASE : List[str]=3_0_5_2_2 ,__SCREAMING_SNAKE_CASE : Optional[Any]=7_6_8 ,__SCREAMING_SNAKE_CASE : List[str]=1_2 ,__SCREAMING_SNAKE_CASE : Tuple=1_2 ,__SCREAMING_SNAKE_CASE : int=3_0_7_2 ,__SCREAMING_SNAKE_CASE : List[str]="gelu" ,__SCREAMING_SNAKE_CASE : str=0.1 ,__SCREAMING_SNAKE_CASE : str=0.1 ,__SCREAMING_SNAKE_CASE : str=5_1_2 ,__SCREAMING_SNAKE_CASE : Optional[Any]=2 ,__SCREAMING_SNAKE_CASE : int=0.02 ,__SCREAMING_SNAKE_CASE : Any=1e-12 ,__SCREAMING_SNAKE_CASE : Optional[int]=1 ,__SCREAMING_SNAKE_CASE : List[str]=0 ,__SCREAMING_SNAKE_CASE : int=2 ,__SCREAMING_SNAKE_CASE : Optional[int]="absolute" ,__SCREAMING_SNAKE_CASE : Optional[int]=False ,__SCREAMING_SNAKE_CASE : Union[str, Any]="none" ,**__SCREAMING_SNAKE_CASE : Dict ,):
super().__init__(pad_token_id=_lowercase ,bos_token_id=_lowercase ,eos_token_id=_lowercase ,**_lowercase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = quant_mode
UpperCAmelCase = force_dequant
class __magic_name__ ( _SCREAMING_SNAKE_CASE):
@property
def _UpperCAmelCase ( self : Optional[int] ):
if self.task == "multiple-choice":
UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 333 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase = BeautifulSoup(res.text, """html.parser""")
_lowercase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 500 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A ():
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_lowerCAmelCase = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCamelCase__ ( a__) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = DPTConfig(embedding_type='hybrid')
if "large" in checkpoint_url:
_snake_case : Dict = 1_0_2_4
_snake_case : Tuple = 4_0_9_6
_snake_case : Optional[int] = 2_4
_snake_case : List[str] = 1_6
_snake_case : Optional[Any] = [5, 1_1, 1_7, 2_3]
_snake_case : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
_snake_case : Tuple = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
_snake_case : str = 7_6_8
_snake_case : str = [1, 1, 1, 0.5]
_snake_case : List[str] = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
_snake_case : str = 1_5_0
_snake_case : Any = 1_6
_snake_case : Dict = (1, 3_8_4, 3_8_4)
_snake_case : Union[str, Any] = False
_snake_case : List[str] = 'project'
if "ade" in checkpoint_url:
_snake_case : Optional[Any] = True
_snake_case : List[Any] = 7_6_8
_snake_case : str = [1, 1, 1, 0.5]
_snake_case : Tuple = 1_5_0
_snake_case : Tuple = 1_6
_snake_case : List[Any] = 'huggingface/label-files'
_snake_case : List[str] = 'ade20k-id2label.json'
_snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='dataset')) , 'r'))
_snake_case : Union[str, Any] = {int(__lowerCamelCase): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
_snake_case : int = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def lowerCamelCase__ ( a__) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase)
def lowerCamelCase__ ( a__) -> Optional[Any]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_snake_case : List[str] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
_snake_case : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
_snake_case : int = name.replace('patch_embed' , '')
if "pos_embed" in name:
_snake_case : Union[str, Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
_snake_case : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
_snake_case : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
_snake_case : str = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
_snake_case : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
_snake_case : Union[str, Any] = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name and "backbone" not in name:
_snake_case : int = name.replace('norm1' , 'layernorm_before')
if "norm2" in name and "backbone" not in name:
_snake_case : List[Any] = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
_snake_case : List[str] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
_snake_case : Union[str, Any] = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
_snake_case : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
_snake_case : Union[str, Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
_snake_case : Any = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
_snake_case : Union[str, Any] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
_snake_case : Tuple = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_snake_case : Tuple = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4)}""")
if "out_conv" in name:
_snake_case : int = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
_snake_case : Any = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
_snake_case : Optional[int] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
_snake_case : List[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
_snake_case : Union[str, Any] = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_snake_case : Optional[int] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
_snake_case : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
_snake_case : Tuple = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
_snake_case : Any = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_snake_case : int = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
_snake_case : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
_snake_case : int = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
_snake_case : List[str] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
_snake_case : Optional[int] = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
_snake_case : List[str] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
_snake_case : Any = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
_snake_case : Optional[int] = name.replace('pretrained' , 'dpt')
if "bn" in name:
_snake_case : Optional[Any] = name.replace('bn' , 'batch_norm')
if "head" in name:
_snake_case : str = name.replace('head' , 'head.head')
if "encoder.norm" in name:
_snake_case : Optional[Any] = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
_snake_case : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
if "backbone" in name:
_snake_case : Any = name.replace('backbone' , 'backbone.bit.encoder')
if ".." in name:
_snake_case : Tuple = name.replace('..' , '.')
if "stem.conv" in name:
_snake_case : Optional[int] = name.replace('stem.conv' , 'bit.embedder.convolution')
if "blocks" in name:
_snake_case : Any = name.replace('blocks' , 'layers')
if "convolution" in name and "backbone" in name:
_snake_case : Dict = name.replace('convolution' , 'conv')
if "layer" in name and "backbone" in name:
_snake_case : List[Any] = name.replace('layer' , 'layers')
if "backbone.bit.encoder.bit" in name:
_snake_case : int = name.replace('backbone.bit.encoder.bit' , 'backbone.bit')
if "embedder.conv" in name:
_snake_case : Optional[int] = name.replace('embedder.conv' , 'embedder.convolution')
if "backbone.bit.encoder.stem.norm" in name:
_snake_case : int = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm')
return name
def lowerCamelCase__ ( a__ , a__) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""")
_snake_case : int = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
_snake_case : Dict = in_proj_weight[: config.hidden_size, :]
_snake_case : Tuple = in_proj_bias[: config.hidden_size]
_snake_case : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : str = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : Any = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case : Union[str, Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( a__ , a__ , a__ , a__ , a__) -> Any:
"""simple docstring"""
_snake_case , _snake_case : Tuple = get_dpt_config(__lowerCamelCase)
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_snake_case : List[str] = torch.load(__lowerCamelCase , map_location='cpu')
# remove certain keys
remove_ignore_keys_(__lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
_snake_case : List[Any] = state_dict.pop(__lowerCamelCase)
_snake_case : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase)
# load HuggingFace model
_snake_case : str = DPTForSemanticSegmentation(__lowerCamelCase) if 'ade' in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase)
model.load_state_dict(__lowerCamelCase)
model.eval()
# Check outputs on an image
_snake_case : Optional[int] = 4_8_0 if 'ade' in checkpoint_url else 3_8_4
_snake_case : Dict = DPTImageProcessor(size=__lowerCamelCase)
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = image_processor(__lowerCamelCase , return_tensors='pt')
# forward pass
_snake_case : str = model(**__lowerCamelCase).logits if 'ade' in checkpoint_url else model(**__lowerCamelCase).predicted_depth
if show_prediction:
_snake_case : Any = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=__lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase).mkdir(exist_ok=__lowerCamelCase)
print(F"""Saving model to {pytorch_dump_folder_path}""")
model.save_pretrained(__lowerCamelCase)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(__lowerCamelCase)
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas')
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas')
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 517 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5 | 0 |
from __future__ import annotations
from collections.abc import Callable
A_ : Optional[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> int:
A__ : str = len(__lowerCamelCase )
A__ : Optional[int] = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
A__ : Any = 42
A__ : Tuple = 42
A__ : List[Any] = 42
A__ : Dict = 42
A__ : Tuple = 42
A__ : Dict = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
A__ : int = matrix[row][col]
A__ : int = vector[row][0]
A__ : int = 0
A__ : Dict = 0
while row < size and col < size:
# pivoting
A__ : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase , __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCamelCase ):
A__ : Tuple = augmented[rowa][col] / augmented[row][col]
A__ : Tuple = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCamelCase ):
for row in range(__lowerCamelCase ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCamelCase )
]
def UpperCamelCase (lowercase_: list[int] ) -> Optional[Any]:
A__ : Optional[Any] = len(__lowerCamelCase )
A__ : Tuple = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
A__ : Optional[int] = [[0] for _ in range(__lowerCamelCase )]
A__ : List[str] = 42
A__ : Any = 42
A__ : Optional[int] = 42
A__ : Tuple = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : int = y_val
A__ : Any = solve(__lowerCamelCase , __lowerCamelCase )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> Optional[Any]:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> str:
A__ : Tuple = [func(__lowerCamelCase ) for x_val in range(1 , order + 1 )]
A__ : Optional[Any] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : List[Any] = 0
A__ : Tuple = 42
A__ : Dict = 42
for poly in polynomials:
A__ : List[Any] = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 456 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
"""simple docstring"""
import os
def a ( ):
'''simple docstring'''
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
UpperCAmelCase_ :Tuple = str(file.readlines()[0] )
UpperCAmelCase_ :Any = names.replace('''\"''', '''''' ).split(''',''' )
names.sort()
UpperCAmelCase_ :Optional[int] = 0
UpperCAmelCase_ :int = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
UpperCAmelCase_ :Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 608 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowercase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
__SCREAMING_SNAKE_CASE : Any = g.get_repo("huggingface/diffusers" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Tuple = sorted(issue.get_comments() , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 211 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = ['''input_values''', '''padding_mask''']
def __init__( self , _lowercase = 1 , _lowercase = 24_000 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , **_lowercase , ):
"""simple docstring"""
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _lowercase , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
_lowerCAmelCase = True
_lowerCAmelCase = bool(
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_lowerCAmelCase = [np.asarray(_lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray(_lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(_lowercase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
_lowerCAmelCase = None
_lowerCAmelCase = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowerCAmelCase = min(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.floor(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowerCAmelCase = max(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.ceil(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowerCAmelCase = """max_length"""
else:
_lowerCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
_lowerCAmelCase = self.pad(
_lowercase , max_length=_lowercase , truncation=_lowercase , padding=_lowercase , return_attention_mask=_lowercase , )
if padding:
_lowerCAmelCase = padded_inputs.pop("""attention_mask""" )
_lowerCAmelCase = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
_lowerCAmelCase = example[..., None]
input_values.append(example.T )
_lowerCAmelCase = input_values
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 5 | 0 |
import pytest
A__ : List[str] = '''__dummy_dataset1__'''
A__ : List[str] = '''
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = dataset_loading_script_name
lowerCAmelCase_ : str = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__lowerCamelCase )
lowerCAmelCase_ : List[Any] = script_dir / f"""{script_name}.py"""
with open(__lowerCamelCase ,'''w''' ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
| 171 |
'''simple docstring'''
_lowercase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
A__ : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
A__ : str = "question"
A__ : str = "context"
A__ : str = "answers"
@property
def snake_case__ ( self ) -> Optional[Any]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 104 |
'''simple docstring'''
import functools
def A (__lowerCamelCase :list[int] , __lowerCamelCase :list[int] ):
# Validation
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__lowerCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
_lowerCAmelCase = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase :int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from collections import defaultdict
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = first_str.lower().strip()
A_ = second_str.lower().strip()
# Remove whitespace
A_ = first_str.replace(''' ''' , '''''' )
A_ = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
# Default values for count should be 0
A_ = defaultdict(__lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowercase = input("""Enter the first string """).strip()
__lowercase = input("""Enter the second string """).strip()
__lowercase = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 203 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 384
if "tiny" in model_name:
_lowerCAmelCase = [3, 3, 9, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [128, 256, 512, 1024]
_lowerCAmelCase = 512
if "large" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [192, 384, 768, 1536]
_lowerCAmelCase = 768
if "xlarge" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [256, 512, 1024, 2048]
_lowerCAmelCase = 1024
# set label information
_lowerCAmelCase = 150
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """ade20k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = ConvNextConfig(
depths=__lowerCamelCase , hidden_sizes=__lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
_lowerCAmelCase = UperNetConfig(
backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def A (__lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Dict , __lowerCamelCase :Tuple ):
_lowerCAmelCase = dct.pop(__lowerCamelCase )
_lowerCAmelCase = val
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Any ):
_lowerCAmelCase = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""state_dict"""]
_lowerCAmelCase = get_upernet_config(__lowerCamelCase )
_lowerCAmelCase = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(__lowerCamelCase )
if "bn" in key:
_lowerCAmelCase = key.replace("""bn""" , """batch_norm""" )
_lowerCAmelCase = val
# rename keys
_lowerCAmelCase = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
_lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_lowerCAmelCase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
_lowerCAmelCase = SegformerImageProcessor()
_lowerCAmelCase = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_lowerCAmelCase = model(__lowerCamelCase )
if model_name == "upernet-convnext-tiny":
_lowerCAmelCase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_lowerCAmelCase = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_lowerCAmelCase = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_lowerCAmelCase = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_lowerCAmelCase = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 5 | 0 |
import os
def lowercase ( a = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(__lowerCamelCase ) , __lowerCamelCase ) ) as input_file:
SCREAMING_SNAKE_CASE_ :int = [
[int(__lowerCamelCase ) for element in line.split("," )]
for line in input_file.readlines()
]
SCREAMING_SNAKE_CASE_ :str = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ :Dict = len(matrix[0] )
SCREAMING_SNAKE_CASE_ :str = [[-1 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ :List[Any] = matrix[i][0]
for j in range(1 , __lowerCamelCase ):
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ :Dict = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ :List[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
SCREAMING_SNAKE_CASE_ :List[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 631 |
'''simple docstring'''
from itertools import product
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
_lowerCAmelCase = sides_number
_lowerCAmelCase = max_face_number * dice_number
_lowerCAmelCase = [0] * (max_total + 1)
_lowerCAmelCase = 1
_lowerCAmelCase = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
_lowerCAmelCase = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def A ():
_lowerCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCAmelCase = 0
_lowerCAmelCase = 9
_lowerCAmelCase = 4 * 9
_lowerCAmelCase = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCAmelCase = (4**9) * (6**6)
_lowerCAmelCase = peter_wins_count / total_games_number
_lowerCAmelCase = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 | 0 |
import math
import random
def a__ ( A__, A__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase__ : List[str] =0.0_2
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Any = float(2 * (random.randint(1, 1_0_0 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ : List[Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ : List[str] = (expected / 1_0_0) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ : List[Any] = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : Any =int(input('Expected value: '))
lowerCAmelCase__ : int =int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 101 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(1 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.align_to(_lowercase , _lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , )
_lowerCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=2.5 ) , Write(_lowercase ) , Write(_lowercase ) )
self.add(_lowercase )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
_lowerCAmelCase = 0.46 / 4
_lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowercase , buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 5 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __magic_name__ ( _SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = '''sew-d'''
def __init__( self : List[str] ,__SCREAMING_SNAKE_CASE : Tuple=3_2 ,__SCREAMING_SNAKE_CASE : List[Any]=7_6_8 ,__SCREAMING_SNAKE_CASE : Tuple=1_2 ,__SCREAMING_SNAKE_CASE : Optional[Any]=1_2 ,__SCREAMING_SNAKE_CASE : int=3_0_7_2 ,__SCREAMING_SNAKE_CASE : List[str]=2 ,__SCREAMING_SNAKE_CASE : Optional[Any]=5_1_2 ,__SCREAMING_SNAKE_CASE : List[str]=2_5_6 ,__SCREAMING_SNAKE_CASE : Dict=True ,__SCREAMING_SNAKE_CASE : Tuple=True ,__SCREAMING_SNAKE_CASE : str=("p2c", "c2p") ,__SCREAMING_SNAKE_CASE : Optional[Any]="layer_norm" ,__SCREAMING_SNAKE_CASE : Dict="gelu_python" ,__SCREAMING_SNAKE_CASE : Dict=0.1 ,__SCREAMING_SNAKE_CASE : Optional[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Any=0.0 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.02 ,__SCREAMING_SNAKE_CASE : Any=1e-7 ,__SCREAMING_SNAKE_CASE : Optional[Any]=1e-5 ,__SCREAMING_SNAKE_CASE : List[Any]="group" ,__SCREAMING_SNAKE_CASE : List[Any]="gelu" ,__SCREAMING_SNAKE_CASE : Optional[Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) ,__SCREAMING_SNAKE_CASE : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,__SCREAMING_SNAKE_CASE : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,__SCREAMING_SNAKE_CASE : List[Any]=False ,__SCREAMING_SNAKE_CASE : int=1_2_8 ,__SCREAMING_SNAKE_CASE : List[Any]=1_6 ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,__SCREAMING_SNAKE_CASE : List[str]=0.05 ,__SCREAMING_SNAKE_CASE : List[str]=1_0 ,__SCREAMING_SNAKE_CASE : Optional[Any]=2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 ,__SCREAMING_SNAKE_CASE : str=1_0 ,__SCREAMING_SNAKE_CASE : Tuple=0 ,__SCREAMING_SNAKE_CASE : str="mean" ,__SCREAMING_SNAKE_CASE : List[Any]=False ,__SCREAMING_SNAKE_CASE : Dict=False ,__SCREAMING_SNAKE_CASE : Dict=2_5_6 ,__SCREAMING_SNAKE_CASE : Optional[Any]=0 ,__SCREAMING_SNAKE_CASE : int=1 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=2 ,**__SCREAMING_SNAKE_CASE : List[Any] ,):
super().__init__(**_lowercase ,pad_token_id=_lowercase ,bos_token_id=_lowercase ,eos_token_id=_lowercase )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_norm
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(_lowercase )
UpperCAmelCase = list(_lowercase )
UpperCAmelCase = list(_lowercase )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = squeeze_factor
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = position_buckets
UpperCAmelCase = share_att_key
UpperCAmelCase = relative_attention
UpperCAmelCase = norm_rel_ebd
UpperCAmelCase = list(_lowercase )
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = feature_layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# sequence classification
UpperCAmelCase = use_weighted_layer_sum
UpperCAmelCase = classifier_proj_size
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 333 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase = False
try:
_lowercase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase = None , _lowercase = [] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = choices
_lowerCAmelCase = prompt
if sys.platform == "win32":
_lowerCAmelCase = """*"""
else:
_lowerCAmelCase = """➔ """
def _lowercase ( self , _lowercase , _lowercase = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowercase )
else:
forceWrite(self.choices[index] , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowercase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _lowercase ( self , _lowercase , _lowercase = 1 ):
"""simple docstring"""
_lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = int(chr(self.current_selection ) )
_lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowercase )
else:
return
else:
return
def _lowercase ( self , _lowercase = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase = int(builtins.input() )
except ValueError:
_lowerCAmelCase = default_choice
else:
_lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_lowercase , """\n""" )
return choice
| 5 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE):
lowerCamelCase__ = '''Wav2Vec2FeatureExtractor'''
lowerCamelCase__ = '''AutoTokenizer'''
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__(_lowercase, _lowercase)
_lowerCAmelCase : List[str] = self.feature_extractor
_lowerCAmelCase : Union[str, Any] = False
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
try:
return super().from_pretrained(_lowercase, **_lowercase)
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: ", _lowercase, )
_lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(_lowercase, **_lowercase)
_lowerCAmelCase : List[Any] = WavaVecaCTCTokenizer.from_pretrained(_lowercase, **_lowercase)
return cls(feature_extractor=_lowercase, tokenizer=_lowercase)
def __call__( self, *__a, **__a):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_lowercase, **_lowercase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
_lowerCAmelCase : List[str] = kwargs.pop("raw_speech")
else:
_lowerCAmelCase : Optional[Any] = kwargs.pop("audio", _lowercase)
_lowerCAmelCase : Any = kwargs.pop("sampling_rate", _lowercase)
_lowerCAmelCase : Dict = kwargs.pop("text", _lowercase)
if len(_lowercase) > 0:
_lowerCAmelCase : Dict = args[0]
_lowerCAmelCase : List[str] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
_lowerCAmelCase : Tuple = self.feature_extractor(_lowercase, *_lowercase, sampling_rate=_lowercase, **_lowercase)
if text is not None:
_lowerCAmelCase : Optional[int] = self.tokenizer(_lowercase, **_lowercase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase : Tuple = encodings["input_ids"]
return inputs
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_lowercase, **_lowercase)
_lowerCAmelCase : Any = kwargs.pop("input_features", _lowercase)
_lowerCAmelCase : List[Any] = kwargs.pop("labels", _lowercase)
if len(_lowercase) > 0:
_lowerCAmelCase : Union[str, Any] = args[0]
_lowerCAmelCase : Optional[int] = args[1:]
if input_features is not None:
_lowerCAmelCase : Any = self.feature_extractor.pad(_lowercase, *_lowercase, **_lowercase)
if labels is not None:
_lowerCAmelCase : Optional[Any] = self.tokenizer.pad(_lowercase, **_lowercase)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCAmelCase : List[str] = labels["input_ids"]
return input_features
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase, **_lowercase)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase, **_lowercase)
@contextmanager
def snake_case__ ( self):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = self.tokenizer
yield
_lowerCAmelCase : Tuple = self.feature_extractor
_lowerCAmelCase : Optional[Any] = False
| 500 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_lowercase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def A (__lowerCamelCase :Optional[int] ):
_lowerCAmelCase = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
_lowerCAmelCase = line.strip()
if line:
_lowerCAmelCase = line.split()
_lowerCAmelCase = line_number
_lowerCAmelCase = words[0]
_lowerCAmelCase = value
return result
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any] , __lowerCamelCase :List[str] ):
for attribute in key.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase = value[0]
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Dict , __lowerCamelCase :List[Any] , __lowerCamelCase :int ):
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = """.""".join([key, hf_param_name] )
else:
_lowerCAmelCase = key
_lowerCAmelCase = value if """lm_head""" in full_key else value[0]
_lowercase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def A (__lowerCamelCase :Any , __lowerCamelCase :int , __lowerCamelCase :List[str]=None , __lowerCamelCase :List[Any]=None ):
_lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase = """weight_v"""
elif "bias" in name:
_lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = """weight"""
else:
_lowerCAmelCase = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def A (__lowerCamelCase :Any , __lowerCamelCase :Dict , __lowerCamelCase :Dict ):
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase = True
else:
_lowerCAmelCase = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[int] , __lowerCamelCase :Any , __lowerCamelCase :List[Any] , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase = name.split(""".""" )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any]=None , __lowerCamelCase :Union[str, Any]=None , __lowerCamelCase :str=True , __lowerCamelCase :str=False ):
if config_path is not None:
_lowerCAmelCase = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase = read_txt_into_dict(__lowerCamelCase )
_lowerCAmelCase = idalabel
_lowerCAmelCase = WavaVecaForSequenceClassification(__lowerCamelCase )
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 0
_lowerCAmelCase = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_lowerCAmelCase = WavaVecaForCTC(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase = fairseq.tasks.setup_task(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_lowercase = parser.parse_args()
_lowercase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 5 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE_ = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 517 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''decision_transformer'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : str = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=17 , _lowercase=4 , _lowercase=128 , _lowercase=4_096 , _lowercase=True , _lowercase=1 , _lowercase=1_024 , _lowercase=3 , _lowercase=1 , _lowercase=None , _lowercase="relu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=False , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = state_dim
_lowerCAmelCase = act_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_ep_len
_lowerCAmelCase = action_tanh
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 5 | 0 |
from math import pi, sqrt, tan
def UpperCamelCase (lowercase_: float ) -> List[str]:
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> Any:
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCamelCase (lowercase_: float ) -> List[str]:
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def UpperCamelCase (lowercase_: float ) -> Optional[int]:
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> int:
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> Any:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
A__ : Dict = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> Any:
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> List[str]:
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(__lowerCamelCase , 2 ) * torus_radius * tube_radius
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> Union[str, Any]:
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def UpperCamelCase (lowercase_: float ) -> Union[str, Any]:
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> Optional[int]:
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> int:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
A__ : str = (sidea + sidea + sidea) / 2
A__ : Dict = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> int:
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> Optional[Any]:
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def UpperCamelCase (lowercase_: float ) -> List[Any]:
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> List[Any]:
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> Union[str, Any]:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCamelCase (lowercase_: int , lowercase_: float ) -> Union[str, Any]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 456 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_lowercase = None
_lowercase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_lowercase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def A (__lowerCamelCase :int , __lowerCamelCase :Optional[Any]=1 , __lowerCamelCase :List[Any]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A (__lowerCamelCase :Any ):
with open(__lowerCamelCase , """r""" ) as f:
return json.load(__lowerCamelCase )
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :int ):
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple=True ):
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """tmp""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = read_json(os.path.join(__lowerCamelCase , """params.json""" ) )
_lowerCAmelCase = NUM_SHARDS[model_size]
_lowerCAmelCase = params["""n_layers"""]
_lowerCAmelCase = params["""n_heads"""]
_lowerCAmelCase = n_heads // num_shards
_lowerCAmelCase = params["""dim"""]
_lowerCAmelCase = dim // n_heads
_lowerCAmelCase = 10_000.0
_lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , __lowerCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
_lowerCAmelCase = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase = n_heads
_lowerCAmelCase = n_heads_per_shard
_lowerCAmelCase = dim
# permute for sliced rotary
def permute(__lowerCamelCase :Optional[int] , __lowerCamelCase :str=n_heads , __lowerCamelCase :str=dim , __lowerCamelCase :List[Any]=dim ):
return w.view(__lowerCamelCase , dima // n_heads // 2 , 2 , __lowerCamelCase ).transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase = torch.load(os.path.join(__lowerCamelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_lowerCAmelCase = [
torch.load(os.path.join(__lowerCamelCase , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__lowerCamelCase )
]
_lowerCAmelCase = 0
_lowerCAmelCase = {"""weight_map""": {}}
for layer_i in range(__lowerCamelCase ):
_lowerCAmelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_lowerCAmelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_lowerCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__lowerCamelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__lowerCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
# Write configs
_lowerCAmelCase = {"""total_size""": param_count * 2}
write_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """pytorch_model.bin.index.json""" ) )
_lowerCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_lowerCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
_lowerCAmelCase = LlamaConfig(
hidden_size=__lowerCamelCase , intermediate_size=compute_intermediate_size(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__lowerCamelCase , )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_lowerCAmelCase = LlamaForCausalLM.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__lowerCamelCase , safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any] ):
# Initialize the tokenizer based on the `spm` model
_lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def A ():
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__lowerCamelCase , help="""Whether or not to save using `safetensors`.""" )
_lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class _snake_case ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase__ ='''efficientformer'''
def __init__( self : str , snake_case : int = [3, 2, 6, 4] , snake_case : Dict = [48, 96, 224, 448] , snake_case : Union[str, Any] = [True, True, True, True] , snake_case : Union[str, Any] = 448 , snake_case : str = 32 , snake_case : Dict = 4 , snake_case : List[Any] = 7 , snake_case : Dict = 5 , snake_case : List[Any] = 8 , snake_case : str = 4 , snake_case : Optional[Any] = 0.0 , snake_case : List[str] = 16 , snake_case : Dict = 3 , snake_case : str = 3 , snake_case : List[Any] = 3 , snake_case : Any = 2 , snake_case : int = 1 , snake_case : Optional[Any] = 0.0 , snake_case : List[str] = 1 , snake_case : int = True , snake_case : Dict = True , snake_case : Tuple = 1e-5 , snake_case : Union[str, Any] = "gelu" , snake_case : Optional[int] = 0.02 , snake_case : Optional[Any] = 1e-12 , snake_case : Dict = 224 , snake_case : Dict = 1e-05 , **snake_case : str , ):
super().__init__(**_lowercase )
UpperCAmelCase_ :Union[str, Any] = hidden_act
UpperCAmelCase_ :List[str] = hidden_dropout_prob
UpperCAmelCase_ :List[Any] = hidden_sizes
UpperCAmelCase_ :int = num_hidden_layers
UpperCAmelCase_ :int = num_attention_heads
UpperCAmelCase_ :Union[str, Any] = initializer_range
UpperCAmelCase_ :List[Any] = layer_norm_eps
UpperCAmelCase_ :List[Any] = patch_size
UpperCAmelCase_ :Union[str, Any] = num_channels
UpperCAmelCase_ :Optional[int] = depths
UpperCAmelCase_ :Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase_ :Any = downsamples
UpperCAmelCase_ :Union[str, Any] = dim
UpperCAmelCase_ :Any = key_dim
UpperCAmelCase_ :List[Any] = attention_ratio
UpperCAmelCase_ :List[str] = resolution
UpperCAmelCase_ :int = pool_size
UpperCAmelCase_ :Optional[Any] = downsample_patch_size
UpperCAmelCase_ :List[Any] = downsample_stride
UpperCAmelCase_ :Optional[int] = downsample_pad
UpperCAmelCase_ :Dict = drop_path_rate
UpperCAmelCase_ :Any = num_metaad_blocks
UpperCAmelCase_ :List[Any] = distillation
UpperCAmelCase_ :Optional[Any] = use_layer_scale
UpperCAmelCase_ :List[Any] = layer_scale_init_value
UpperCAmelCase_ :List[str] = image_size
UpperCAmelCase_ :Dict = batch_norm_eps
| 608 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = (DDPMScheduler,)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowercase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 5 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
lowercase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowercase = {
'''allenai/led-base-16384''': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : int = bs[:]
__SCREAMING_SNAKE_CASE : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Tuple = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = set()
__SCREAMING_SNAKE_CASE : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : List[Any] = char
return pairs
class __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , a__ , a__ , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , **a__ , ):
__SCREAMING_SNAKE_CASE : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
__SCREAMING_SNAKE_CASE : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
__SCREAMING_SNAKE_CASE : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
__SCREAMING_SNAKE_CASE : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="utf-8" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : Dict = json.load(_lowercase )
__SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Optional[int] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="utf-8" ) as merges_handle:
__SCREAMING_SNAKE_CASE : str = merges_handle.read().split("\n" )[1:-1]
__SCREAMING_SNAKE_CASE : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__SCREAMING_SNAKE_CASE : List[Any] = {}
__SCREAMING_SNAKE_CASE : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a_ ( self ):
return len(self.encoder )
def a_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self , a__ ):
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : List[str] = tuple(_lowercase )
__SCREAMING_SNAKE_CASE : Any = get_pairs(_lowercase )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(_lowercase , key=lambda a__ : self.bpe_ranks.get(_lowercase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = bigram
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
while i < len(_lowercase ):
try:
__SCREAMING_SNAKE_CASE : int = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = new_word
if len(_lowercase ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : List[str] = get_pairs(_lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = " ".join(_lowercase )
__SCREAMING_SNAKE_CASE : List[str] = word
return word
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : Tuple = []
for token in re.findall(self.pat , _lowercase ):
__SCREAMING_SNAKE_CASE : Optional[int] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(" " ) )
return bpe_tokens
def a_ ( self , a__ ):
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def a_ ( self , a__ ):
return self.decoder.get(_lowercase )
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : Tuple = "".join(_lowercase )
__SCREAMING_SNAKE_CASE : Any = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def a_ ( self , a__ , a__ = None ):
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + "\n" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
__SCREAMING_SNAKE_CASE : Dict = token_index
writer.write(" ".join(_lowercase ) + "\n" )
index += 1
return vocab_file, merge_file
def a_ ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a_ ( self , a__ , a__=False , **a__ ):
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : int = " " + text
return (text, kwargs)
def a_ ( self , a__ , a__ = None , a__ = PaddingStrategy.DO_NOT_PAD , a__ = None , a__ = None , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = super()._pad(
encoded_inputs=_lowercase , max_length=_lowercase , padding_strategy=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , )
# Load from model defaults
if return_attention_mask is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__SCREAMING_SNAKE_CASE : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__SCREAMING_SNAKE_CASE : str = len(encoded_inputs["global_attention_mask"] ) != len(_lowercase )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE : Any = len(_lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__SCREAMING_SNAKE_CASE : str = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__SCREAMING_SNAKE_CASE : Dict = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 211 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_SCREAMING_SNAKE_CASE )} )
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowercase : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowercase : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowercase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowercase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''train'''
_lowercase : Union[str, Any] = '''dev'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = False , _lowercase = None , _lowercase = "pt" , ):
"""simple docstring"""
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features["""features"""]
_lowerCAmelCase = self.old_features.get("""dataset""" , _lowercase )
_lowerCAmelCase = self.old_features.get("""examples""" , _lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
_lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 5 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCamelCase( __UpperCamelCase : Optional[int] ):
return EnvironmentCommand()
def UpperCamelCase( __UpperCamelCase : Dict ):
return EnvironmentCommand(args.accelerate_config_file )
class __snake_case ( _SCREAMING_SNAKE_CASE ):
@staticmethod
def UpperCAmelCase__ ( A_ : Any):
lowerCAmelCase_ : List[Any] = parser.add_parser('''env''')
download_parser.set_defaults(func=_lowercase)
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase)
def __init__( self : Dict , A_ : Optional[int] , *A_ : str):
lowerCAmelCase_ : Optional[int] = accelerate_config_file
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = '''not installed'''
if is_safetensors_available():
import safetensors
lowerCAmelCase_ : Optional[Any] = safetensors.__version__
elif importlib.util.find_spec('''safetensors''') is not None:
import safetensors
lowerCAmelCase_ : Optional[Any] = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
lowerCAmelCase_ : Optional[Any] = '''not installed'''
lowerCAmelCase_ : Tuple = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase_ : Optional[int] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase):
lowerCAmelCase_ : int = load_config_from_file(self._accelerate_config_file).to_dict()
lowerCAmelCase_ : Dict = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()])
if isinstance(_lowercase , _lowercase)
else F"""\t{accelerate_config}"""
)
lowerCAmelCase_ : Tuple = '''not installed'''
lowerCAmelCase_ : str = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ : int = torch.__version__
lowerCAmelCase_ : Optional[Any] = torch.cuda.is_available()
lowerCAmelCase_ : Any = '''not installed'''
lowerCAmelCase_ : Union[str, Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
lowerCAmelCase_ : Optional[Any] = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase_ : Any = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase_ : List[str] = bool(tf.config.list_physical_devices('''GPU'''))
lowerCAmelCase_ : str = '''not installed'''
lowerCAmelCase_ : int = '''not installed'''
lowerCAmelCase_ : Any = '''not installed'''
lowerCAmelCase_ : Any = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase_ : Dict = flax.__version__
lowerCAmelCase_ : Tuple = jax.__version__
lowerCAmelCase_ : List[Any] = jaxlib.__version__
lowerCAmelCase_ : str = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase_ : Optional[Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"""{safetensors_version}""",
'''Accelerate version''': F"""{accelerate_version}""",
'''Accelerate config''': F"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""",
'''Jax version''': F"""{jax_version}""",
'''JaxLib version''': F"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''')
print(self.format_dict(_lowercase))
return info
@staticmethod
def UpperCAmelCase__ ( A_ : Dict):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
| 171 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''dpr'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase = 0 , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = projection_dim
_lowerCAmelCase = position_embedding_type
| 5 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase = """pt"""
elif is_tf_available():
UpperCamelCase = """tf"""
else:
UpperCamelCase = """jax"""
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A__ : str = PerceiverTokenizer
A__ : Optional[Any] = False
def snake_case__ ( self ) -> int:
super().setUp()
A__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ) -> Dict:
A__ = []
for i in range(len(_lowercase ) ):
try:
A__ = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(R"^[ a-zA-Z]+$" , t[1] ) , _lowercase ) )
A__ = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowercase ) , _lowercase ) )
if max_length is not None and len(_lowercase ) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(_lowercase ) < min_length and len(_lowercase ) > 0:
while len(_lowercase ) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase )
if " " not in output_txt and len(_lowercase ) > 1:
A__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowercase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowercase )
)
if with_prefix_space:
A__ = " " + output_txt
A__ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
return output_txt, output_ids
def snake_case__ ( self ) -> Tuple:
A__ = self.perceiver_tokenizer
A__ = "Unicode €."
A__ = tokenizer(_lowercase )
A__ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , _lowercase )
# decoding
A__ = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase , "[CLS]Unicode €.[SEP]" )
A__ = tokenizer("e è é ê ë" )
A__ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , _lowercase )
# decoding
A__ = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def snake_case__ ( self ) -> List[str]:
A__ = self.perceiver_tokenizer
A__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
A__ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
A__ = tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
if FRAMEWORK != "jax":
A__ = list(batch.input_ids.numpy()[0] )
else:
A__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowercase , _lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def snake_case__ ( self ) -> Tuple:
A__ = self.perceiver_tokenizer
A__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
A__ = tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , _lowercase )
self.assertIn("attention_mask" , _lowercase )
self.assertNotIn("decoder_input_ids" , _lowercase )
self.assertNotIn("decoder_attention_mask" , _lowercase )
def snake_case__ ( self ) -> List[str]:
A__ = self.perceiver_tokenizer
A__ = [
"Summary of the text.",
"Another summary.",
]
A__ = tokenizer(
text_target=_lowercase , max_length=32 , padding="max_length" , truncation=_lowercase , return_tensors=_lowercase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def snake_case__ ( self ) -> Optional[int]:
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = " He is very happy, UNwant\u00E9d,running"
A__ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
tokenizer.save_pretrained(_lowercase )
A__ = tokenizer.__class__.from_pretrained(_lowercase )
A__ = after_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
shutil.rmtree(_lowercase )
A__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
A__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
A__ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
tokenizer.save_pretrained(_lowercase )
A__ = tokenizer.__class__.from_pretrained(_lowercase )
A__ = after_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A__ = tokenizer.__class__.from_pretrained(_lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowercase )
def snake_case__ ( self ) -> str:
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowercase )
with open(os.path.join(_lowercase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
A__ = json.load(_lowercase )
with open(os.path.join(_lowercase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
A__ = json.load(_lowercase )
A__ = [f"""<extra_id_{i}>""" for i in range(125 )]
A__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
A__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_lowercase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowercase , _lowercase )
with open(os.path.join(_lowercase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowercase , _lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ = tokenizer_class.from_pretrained(
_lowercase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=_lowercase )]
A__ = tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def snake_case__ ( self ) -> int:
A__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def snake_case__ ( self ) -> List[str]:
pass
def snake_case__ ( self ) -> List[Any]:
pass
def snake_case__ ( self ) -> Optional[int]:
pass
def snake_case__ ( self ) -> List[str]:
pass
def snake_case__ ( self ) -> Tuple:
A__ = self.get_tokenizers(fast=_lowercase , do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A__ = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
A__ = tokenizer.convert_tokens_to_string(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
| 104 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_lowercase = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_lowercase = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_lowercase = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase ( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase ( self , _lowercase , _lowercase , _lowercase=None , _lowercase="uniform_average" , _lowercase=True ):
"""simple docstring"""
_lowerCAmelCase = mean_squared_error(
_lowercase , _lowercase , sample_weight=_lowercase , multioutput=_lowercase , squared=_lowercase )
return {"mse": mse}
| 5 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( _SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def UpperCamelCase ( lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
raise NotImplementedError()
| 203 |
'''simple docstring'''
def A ():
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 5 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : str = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase_ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowerCamelCase_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
lowerCamelCase_ : str = "text"
lowerCamelCase_ : str = "labels"
def _snake_case ( self : Optional[int] , UpperCAmelCase : List[Any]):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column] , _lowercase):
raise ValueError(F"Column {self.label_column} is not a ClassLabel.")
SCREAMING_SNAKE_CASE_ :Union[str, Any] = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_ :List[str] = self.label_schema.copy()
SCREAMING_SNAKE_CASE_ :List[str] = features[self.label_column]
SCREAMING_SNAKE_CASE_ :str = label_schema
return task_template
@property
def _snake_case ( self : Union[str, Any]):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 631 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 5 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = UniSpeechSatForSequenceClassification.from_pretrained(__lowerCamelCase, config=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = downstream_dict['projector.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = downstream_dict['projector.bias']
SCREAMING_SNAKE_CASE_ : int = downstream_dict['model.post_net.linear.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = downstream_dict['model.post_net.linear.bias']
return model
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Any = UniSpeechSatForAudioFrameClassification.from_pretrained(__lowerCamelCase, config=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = downstream_dict['model.linear.weight']
SCREAMING_SNAKE_CASE_ : List[str] = downstream_dict['model.linear.bias']
return model
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = UniSpeechSatForXVector.from_pretrained(__lowerCamelCase, config=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Any = downstream_dict['connector.weight']
SCREAMING_SNAKE_CASE_ : Any = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE_ : Any = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
SCREAMING_SNAKE_CASE_ : List[Any] = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
SCREAMING_SNAKE_CASE_ : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
SCREAMING_SNAKE_CASE_ : str = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
SCREAMING_SNAKE_CASE_ : List[str] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
SCREAMING_SNAKE_CASE_ : List[Any] = downstream_dict['objective.W']
return model
@torch.no_grad()
def a__ ( A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load(__lowerCamelCase, map_location='cpu' )
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoint['Downstream']
SCREAMING_SNAKE_CASE_ : List[str] = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Any = WavaVecaFeatureExtractor.from_pretrained(
__lowerCamelCase, return_attention_mask=__lowerCamelCase, do_normalize=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
SCREAMING_SNAKE_CASE_ : Dict = convert_classification(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
elif arch.endswith('ForAudioFrameClassification' ):
SCREAMING_SNAKE_CASE_ : List[str] = convert_diarization(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
elif arch.endswith('ForXVector' ):
SCREAMING_SNAKE_CASE_ : List[str] = convert_xvector(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE_ : Any = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple =argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCAmelCase__ : Any =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 101 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 5 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __magic_name__ :
def __init__( self : Tuple ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[Any]=1_3 ,__SCREAMING_SNAKE_CASE : List[Any]=7 ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : Dict=True ,__SCREAMING_SNAKE_CASE : List[str]=True ,__SCREAMING_SNAKE_CASE : List[Any]=True ,__SCREAMING_SNAKE_CASE : int=9_9 ,__SCREAMING_SNAKE_CASE : int=3_2 ,__SCREAMING_SNAKE_CASE : str=2 ,__SCREAMING_SNAKE_CASE : Tuple=4 ,__SCREAMING_SNAKE_CASE : Tuple=3_7 ,__SCREAMING_SNAKE_CASE : Tuple="gelu" ,__SCREAMING_SNAKE_CASE : int=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Any=5_1_2 ,__SCREAMING_SNAKE_CASE : Optional[int]=1_6 ,__SCREAMING_SNAKE_CASE : List[str]=2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 ,__SCREAMING_SNAKE_CASE : List[str]=3 ,__SCREAMING_SNAKE_CASE : Optional[Any]=4 ,__SCREAMING_SNAKE_CASE : Dict=None ,):
UpperCAmelCase = parent
UpperCAmelCase = 1_3
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 9_9
UpperCAmelCase = 3_8_4
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 3_7
UpperCAmelCase = "gelu"
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 5_1_2
UpperCAmelCase = 1_6
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = 1_2_8
UpperCAmelCase = 2
UpperCAmelCase = 9
UpperCAmelCase = 1
UpperCAmelCase = None
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=_lowercase ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Any ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFConvBertModel(config=_lowercase )
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(_lowercase )
UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Any ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Dict ):
UpperCAmelCase = TFConvBertForMaskedLM(config=_lowercase )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFConvBertForSequenceClassification(config=_lowercase )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFConvBertForMultipleChoice(config=_lowercase )
UpperCAmelCase = tf.tile(tf.expand_dims(_lowercase ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(_lowercase ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(_lowercase ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : List[str] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFConvBertForTokenClassification(config=_lowercase )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Any ):
UpperCAmelCase = TFConvBertForQuestionAnswering(config=_lowercase )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : Any = False
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = TFConvBertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=_lowercase ,hidden_size=3_7 )
def _UpperCAmelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = True
if hasattr(_lowercase ,"use_cache" ):
UpperCAmelCase = True
UpperCAmelCase = getattr(self.model_tester ,"encoder_seq_length" ,self.model_tester.seq_length )
UpperCAmelCase = getattr(self.model_tester ,"key_length" ,_lowercase )
for model_class in self.all_model_classes:
UpperCAmelCase = self._prepare_for_class(_lowercase ,_lowercase )
UpperCAmelCase = model_class(_lowercase )
UpperCAmelCase = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase ,saved_model=_lowercase )
UpperCAmelCase = os.path.join(_lowercase ,"saved_model" ,"1" )
UpperCAmelCase = tf.keras.models.load_model(_lowercase )
UpperCAmelCase = model(_lowercase )
if self.is_encoder_decoder:
UpperCAmelCase = outputs["encoder_hidden_states"]
UpperCAmelCase = outputs["encoder_attentions"]
else:
UpperCAmelCase = outputs["hidden_states"]
UpperCAmelCase = outputs["attentions"]
self.assertEqual(len(_lowercase ) ,_lowercase )
UpperCAmelCase = getattr(
self.model_tester ,"expected_num_hidden_layers" ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) ,_lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) ,[self.model_tester.seq_length, self.model_tester.hidden_size] ,)
self.assertEqual(len(_lowercase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
@slow
def _UpperCAmelCase ( self : str ):
UpperCAmelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_lowercase )
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = getattr(self.model_tester ,"decoder_seq_length" ,self.model_tester.seq_length )
UpperCAmelCase = getattr(self.model_tester ,"encoder_seq_length" ,self.model_tester.seq_length )
UpperCAmelCase = getattr(self.model_tester ,"key_length" ,_lowercase )
UpperCAmelCase = getattr(self.model_tester ,"key_length" ,_lowercase )
def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE : Any ):
UpperCAmelCase = len(_lowercase )
self.assertEqual(out_len % 2 ,0 )
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] ,)
def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = model_class(_lowercase )
UpperCAmelCase = model(self._prepare_for_class(_lowercase ,_lowercase ) )
UpperCAmelCase = len(_lowercase )
self.assertEqual(config.output_hidden_states ,_lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(_lowercase )
UpperCAmelCase = model(self._prepare_for_class(_lowercase ,_lowercase ) )
self.assertEqual(config.output_hidden_states ,_lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(_lowercase )
UpperCAmelCase = model(self._prepare_for_class(_lowercase ,_lowercase ) )
self.assertEqual(config.output_hidden_states ,_lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(_lowercase )
UpperCAmelCase = model(self._prepare_for_class(_lowercase ,_lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states ,_lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(_lowercase )[0]
UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape ,_lowercase )
UpperCAmelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,_lowercase ,atol=1e-4 )
| 333 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase = BeautifulSoup(res.text, """html.parser""")
_lowercase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 500 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A ():
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_lowerCAmelCase = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''roberta'''
def __init__( self : Dict , snake_case : List[Any]=50265 , snake_case : int=768 , snake_case : List[Any]=12 , snake_case : str=12 , snake_case : List[Any]=3072 , snake_case : Optional[int]="gelu" , snake_case : Union[str, Any]=0.1 , snake_case : int=0.1 , snake_case : Dict=512 , snake_case : Dict=2 , snake_case : Any=0.02 , snake_case : Dict=1e-12 , snake_case : List[str]=1 , snake_case : str=0 , snake_case : Union[str, Any]=2 , snake_case : Dict="absolute" , snake_case : Union[str, Any]=True , snake_case : str=None , **snake_case : int , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_snake_case : Dict = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Dict = type_vocab_size
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : List[str] = position_embedding_type
_snake_case : str = use_cache
_snake_case : Dict = classifier_dropout
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 517 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class _a (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__: List[str] = '''data2vec-vision'''
def __init__( self , A__=768 , A__=12 , A__=12 , A__=3072 , A__="gelu" , A__=0.0 , A__=0.0 , A__=0.0_2 , A__=1e-12 , A__=224 , A__=16 , A__=3 , A__=False , A__=False , A__=False , A__=False , A__=0.1 , A__=0.1 , A__=True , A__=[3, 5, 7, 11] , A__=[1, 2, 3, 6] , A__=True , A__=0.4 , A__=256 , A__=1 , A__=False , A__=255 , **A__ , ):
super().__init__(**_lowercase )
A__ : List[Any] = hidden_size
A__ : str = num_hidden_layers
A__ : List[Any] = num_attention_heads
A__ : str = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : int = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : List[str] = image_size
A__ : Optional[Any] = patch_size
A__ : str = num_channels
A__ : Tuple = use_mask_token
A__ : List[Any] = use_absolute_position_embeddings
A__ : Optional[Any] = use_relative_position_bias
A__ : int = use_shared_relative_position_bias
A__ : Any = layer_scale_init_value
A__ : List[Any] = drop_path_rate
A__ : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
A__ : str = out_indices
A__ : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
A__ : int = use_auxiliary_head
A__ : int = auxiliary_loss_weight
A__ : int = auxiliary_channels
A__ : Dict = auxiliary_num_convs
A__ : int = auxiliary_concat_input
A__ : Tuple = semantic_loss_ignore_index
class _a (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = version.parse('''1.11''' )
@property
def __A ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __A ( self ):
return 1e-4
| 456 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
"""simple docstring"""
import functools
def a ( __snake_case : list[int], __snake_case : list[int] ):
'''simple docstring'''
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase_ :List[str] = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__snake_case : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5 | 0 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase = logging.get_logger(__name__)
def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[int, Iterable[int]] , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
def constraint_to_multiple_of(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : Any=None ):
__SCREAMING_SNAKE_CASE : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__SCREAMING_SNAKE_CASE : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
__SCREAMING_SNAKE_CASE : Tuple = math.ceil(val / multiple ) * multiple
return x
__SCREAMING_SNAKE_CASE : Tuple = (output_size, output_size) if isinstance(__lowerCamelCase , __lowerCamelCase ) else output_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_image_size(__lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = output_size
# determine new height and width
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_height / input_height
__SCREAMING_SNAKE_CASE : List[str] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__SCREAMING_SNAKE_CASE : Union[str, Any] = scale_width
else:
# fit height
__SCREAMING_SNAKE_CASE : Tuple = scale_height
__SCREAMING_SNAKE_CASE : Dict = constraint_to_multiple_of(scale_height * input_height , multiple=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = constraint_to_multiple_of(scale_width * input_width , multiple=__lowerCamelCase )
return (new_height, new_width)
class __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[str] = ['''pixel_values''']
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = False , a__ = 1 , a__ = True , a__ = 1 / 255 , a__ = True , a__ = None , a__ = None , **a__ , ):
super().__init__(**_lowercase )
__SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"height": 384, "width": 384}
__SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(_lowercase )
__SCREAMING_SNAKE_CASE : Any = do_resize
__SCREAMING_SNAKE_CASE : Any = size
__SCREAMING_SNAKE_CASE : List[str] = keep_aspect_ratio
__SCREAMING_SNAKE_CASE : int = ensure_multiple_of
__SCREAMING_SNAKE_CASE : List[str] = resample
__SCREAMING_SNAKE_CASE : Tuple = do_rescale
__SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self , a__ , a__ , a__ = False , a__ = 1 , a__ = PILImageResampling.BICUBIC , a__ = None , **a__ , ):
__SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
__SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
_lowercase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def a_ ( self , a__ , a__ , a__ = None , **a__ , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def a_ ( self , a__ , a__ , a__ , a__ = None , **a__ , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def a_ ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
__SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : Tuple = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_lowercase )
__SCREAMING_SNAKE_CASE : int = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__SCREAMING_SNAKE_CASE : Union[str, Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : int = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE : Optional[int] = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
__SCREAMING_SNAKE_CASE : str = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
__SCREAMING_SNAKE_CASE : List[Any] = {"pixel_values": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_lowercase ):
__SCREAMING_SNAKE_CASE : Tuple = target_sizes.numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for idx in range(len(_lowercase ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
__SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 211 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = ['''input_values''', '''padding_mask''']
def __init__( self , _lowercase = 1 , _lowercase = 24_000 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , **_lowercase , ):
"""simple docstring"""
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _lowercase , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
_lowerCAmelCase = True
_lowerCAmelCase = bool(
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_lowerCAmelCase = [np.asarray(_lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray(_lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(_lowercase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
_lowerCAmelCase = None
_lowerCAmelCase = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowerCAmelCase = min(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.floor(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowerCAmelCase = max(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.ceil(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowerCAmelCase = """max_length"""
else:
_lowerCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
_lowerCAmelCase = self.pad(
_lowercase , max_length=_lowercase , truncation=_lowercase , padding=_lowercase , return_attention_mask=_lowercase , )
if padding:
_lowerCAmelCase = padded_inputs.pop("""attention_mask""" )
_lowerCAmelCase = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
_lowerCAmelCase = example[..., None]
input_values.append(example.T )
_lowerCAmelCase = input_values
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 5 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase( __UpperCamelCase : int = 2000000 ):
lowerCAmelCase_ : int = [0]
lowerCAmelCase_ : List[str] = 42
for idx in range(1 ,ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowerCAmelCase_ : Tuple = 0
# the area corresponding to the grid that gives the product closest to target
lowerCAmelCase_ : Union[str, Any] = 0
# an estimate of b, using the quadratic formula
lowerCAmelCase_ : Optional[int] = 42
# the largest integer less than b_estimate
lowerCAmelCase_ : List[str] = 42
# the largest integer less than b_estimate
lowerCAmelCase_ : List[str] = 42
# the triangle number corresponding to b_floor
lowerCAmelCase_ : int = 42
# the triangle number corresponding to b_ceil
lowerCAmelCase_ : Optional[int] = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] ,1 ):
lowerCAmelCase_ : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowerCAmelCase_ : Tuple = floor(__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = ceil(__lowerCamelCase )
lowerCAmelCase_ : int = triangle_numbers[b_floor]
lowerCAmelCase_ : str = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase_ : Union[str, Any] = triangle_b_first_guess * triangle_a
lowerCAmelCase_ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase_ : int = triangle_b_second_guess * triangle_a
lowerCAmelCase_ : List[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 171 |
'''simple docstring'''
_lowercase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5 | 0 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase = logging.get_logger(__name__)
def _lowerCamelCase ( UpperCAmelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
A__ = r"\w+[.]\d+"
A__ = re.findall(__lowerCamelCase, __lowerCamelCase )
for pat in pats:
A__ = key.replace(__lowerCamelCase, "_".join(pat.split("." ) ) )
return key
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Any, UpperCAmelCase_ : int ) -> Any:
"""simple docstring"""
A__ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A__ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A__ = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
A__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCamelCase ( UpperCAmelCase_ : Any, UpperCAmelCase_ : int, UpperCAmelCase_ : Dict=42 ) -> Any:
"""simple docstring"""
A__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A__ = flax_model.init_weights(PRNGKey(__lowerCamelCase ) )
A__ = flatten_dict(__lowerCamelCase )
A__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A__ = rename_key(__lowerCamelCase )
A__ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
A__ , A__ = rename_key_and_reshape_tensor(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A__ = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
| 104 |
'''simple docstring'''
import functools
def A (__lowerCamelCase :list[int] , __lowerCamelCase :list[int] ):
# Validation
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__lowerCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
_lowerCAmelCase = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase :int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__lowercase = logging.get_logger(__name__)
class _lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self : str , lowerCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
A_ = nn.ModuleList(_lowercase )
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any = None , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : int = None , lowerCamelCase__ : str = False , lowerCamelCase__ : Tuple = True , ) -> List[Any]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(_lowercase , _lowercase , self.nets ) ):
A_ ,A_ = controlnet(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )
# merge samples
if i == 0:
A_ ,A_ = down_samples, mid_sample
else:
A_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_lowercase , _lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int = True , lowerCamelCase__ : int = None , lowerCamelCase__ : List[str] = False , lowerCamelCase__ : Optional[int] = None , ) -> List[Any]:
"""simple docstring"""
A_ = 0
A_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_lowercase , is_main_process=_lowercase , save_function=_lowercase , safe_serialization=_lowercase , variant=_lowercase , )
idx += 1
A_ = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase ( cls : Optional[int] , lowerCamelCase__ : Dict , **lowerCamelCase__ : int ) -> List[str]:
"""simple docstring"""
A_ = 0
A_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
A_ = pretrained_model_path
while os.path.isdir(_lowercase ):
A_ = ControlNetModel.from_pretrained(_lowercase , **_lowercase )
controlnets.append(_lowercase )
idx += 1
A_ = pretrained_model_path + F"_{idx}"
logger.info(F"{len(_lowercase )} controlnets loaded from {pretrained_model_path}." )
if len(_lowercase ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(_lowercase )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(_lowercase )
| 203 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 384
if "tiny" in model_name:
_lowerCAmelCase = [3, 3, 9, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [128, 256, 512, 1024]
_lowerCAmelCase = 512
if "large" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [192, 384, 768, 1536]
_lowerCAmelCase = 768
if "xlarge" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [256, 512, 1024, 2048]
_lowerCAmelCase = 1024
# set label information
_lowerCAmelCase = 150
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """ade20k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = ConvNextConfig(
depths=__lowerCamelCase , hidden_sizes=__lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
_lowerCAmelCase = UperNetConfig(
backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def A (__lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Dict , __lowerCamelCase :Tuple ):
_lowerCAmelCase = dct.pop(__lowerCamelCase )
_lowerCAmelCase = val
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Any ):
_lowerCAmelCase = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""state_dict"""]
_lowerCAmelCase = get_upernet_config(__lowerCamelCase )
_lowerCAmelCase = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(__lowerCamelCase )
if "bn" in key:
_lowerCAmelCase = key.replace("""bn""" , """batch_norm""" )
_lowerCAmelCase = val
# rename keys
_lowerCAmelCase = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
_lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_lowerCAmelCase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
_lowerCAmelCase = SegformerImageProcessor()
_lowerCAmelCase = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_lowerCAmelCase = model(__lowerCamelCase )
if model_name == "upernet-convnext-tiny":
_lowerCAmelCase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_lowerCAmelCase = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_lowerCAmelCase = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_lowerCAmelCase = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_lowerCAmelCase = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 5 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.