code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
SCREAMING_SNAKE_CASE__ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase:
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__snake_case : bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__snake_case : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__snake_case : bool = field(
default=_lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _lowercase ( self : List[str] ):
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class __lowerCAmelCase:
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__snake_case : Optional[str] = field(default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__snake_case : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__snake_case : bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__snake_case : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__snake_case : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__snake_case : Optional[int] = field(
default=_lowerCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__snake_case : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case : bool = field(
default=_lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def _lowercase ( self : str ):
"""simple docstring"""
if self.train_file is not None:
SCREAMING_SNAKE_CASE_ :List[Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ :Optional[Any] = [json.loads(SCREAMING_SNAKE_CASE ) for line in f.read().splitlines() if (len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace())]
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = {c: dataset[c] for c in dataset.column_names}
SCREAMING_SNAKE_CASE_ :Optional[int] = refs
return Dataset.from_dict(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
SCREAMING_SNAKE_CASE_ :Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
SCREAMING_SNAKE_CASE_ :Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE_ :Any = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE_ :str = data_args.validation_file
SCREAMING_SNAKE_CASE_ :Dict = data_args.train_file.split('.' )[-1]
if extension == "txt":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 'text'
SCREAMING_SNAKE_CASE_ :Tuple = load_dataset(SCREAMING_SNAKE_CASE , data_files=SCREAMING_SNAKE_CASE )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ :Dict = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE_ :List[Any] = AutoConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ :List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE_ :int = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ :int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ :Optional[int] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
SCREAMING_SNAKE_CASE_ :Tuple = AutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
SCREAMING_SNAKE_CASE_ :str = datasets['train'].column_names
else:
SCREAMING_SNAKE_CASE_ :List[Any] = datasets['validation'].column_names
SCREAMING_SNAKE_CASE_ :List[str] = 'text' if 'text' in column_names else column_names[0]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(SCREAMING_SNAKE_CASE ):
# Remove empty lines
SCREAMING_SNAKE_CASE_ :List[str] = [line for line in examples['text'] if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
SCREAMING_SNAKE_CASE_ :List[str] = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
SCREAMING_SNAKE_CASE_ :List[str] = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
SCREAMING_SNAKE_CASE_ :List[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
SCREAMING_SNAKE_CASE_ :Optional[int] = False
# Data collator
# This one will take care of randomly masking the tokens.
SCREAMING_SNAKE_CASE_ :Optional[Any] = DataCollatorForWholeWordMask(tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ :Optional[int] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ :int = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE_ :int = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE_ :Dict = None
SCREAMING_SNAKE_CASE_ :Tuple = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE_ :Dict = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
SCREAMING_SNAKE_CASE_ :Optional[int] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE_ :Dict = trainer.evaluate()
SCREAMING_SNAKE_CASE_ :Optional[int] = math.exp(eval_output['eval_loss'] )
SCREAMING_SNAKE_CASE_ :str = perplexity
SCREAMING_SNAKE_CASE_ :str = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
main()
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : List[str] = ['image_processor', 'tokenizer']
__snake_case : Optional[int] = 'Pix2StructImageProcessor'
__snake_case : Optional[int] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = False
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = 2_048 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE : str , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer
SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE_ :Any = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , header_text=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ :Any = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE_ :List[Any] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE_ :Any = text_encoding.pop('input_ids' )
else:
SCREAMING_SNAKE_CASE_ :Any = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE )
return encoding_image_processor
def _lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ :Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 233 | 0 |
'''simple docstring'''
UpperCAmelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__A= {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
__A= Stack()
__A= Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
__A= operator_stack.peek()
operator_stack.pop()
__A= operand_stack.peek()
operand_stack.pop()
__A= operand_stack.peek()
operand_stack.pop()
__A= operators[opr](_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 186 |
'''simple docstring'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
__A= _modexpt(_SCREAMING_SNAKE_CASE,exponent // 2,_SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_SCREAMING_SNAKE_CASE,exponent - 1,_SCREAMING_SNAKE_CASE )) % modulo_value
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int = 1777,_SCREAMING_SNAKE_CASE : int = 1855,_SCREAMING_SNAKE_CASE : int = 8 ):
"""simple docstring"""
__A= base
for _ in range(1,_SCREAMING_SNAKE_CASE ):
__A= _modexpt(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 186 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__SCREAMING_SNAKE_CASE : List[str] = 16
__SCREAMING_SNAKE_CASE : Union[str, Any] = 32
def snake_case_ ( lowercase__ : str , lowercase__ : Optional[Any] = 16 ):
'''simple docstring'''
_lowerCAmelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase =datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase =16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase =8
else:
_lowerCAmelCase =None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_lowerCAmelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
_lowerCAmelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__SCREAMING_SNAKE_CASE : Optional[int] = mocked_dataloaders # noqa: F811
def snake_case_ ( lowercase__ : Any , lowercase__ : Union[str, Any] ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
_lowerCAmelCase =2
# New Code #
_lowerCAmelCase =int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCAmelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase =config["""lr"""]
_lowerCAmelCase =int(config["""num_epochs"""] )
_lowerCAmelCase =int(config["""seed"""] )
_lowerCAmelCase =int(config["""batch_size"""] )
_lowerCAmelCase =evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
_lowerCAmelCase , _lowerCAmelCase =get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase =model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase =AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
_lowerCAmelCase =get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
_lowerCAmelCase =model(**lowercase__ )
_lowerCAmelCase =output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase =model(**lowercase__ )
_lowerCAmelCase =outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
_lowerCAmelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , lowercase__ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 702 |
def snake_case_ ( lowercase__ : list[int] ):
'''simple docstring'''
_lowerCAmelCase =[]
if len(lowercase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase__ ) ):
_lowerCAmelCase =nums.pop(0 )
_lowerCAmelCase =permute(lowercase__ )
for perm in permutations:
perm.append(lowercase__ )
result.extend(lowercase__ )
nums.append(lowercase__ )
return result
def snake_case_ ( lowercase__ : Optional[Any] ):
'''simple docstring'''
def backtrack(lowercase__ : List[Any] ):
if start == len(lowercase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase__ , len(lowercase__ ) ):
_lowerCAmelCase , _lowerCAmelCase =nums[i], nums[start]
backtrack(start + 1 )
_lowerCAmelCase , _lowerCAmelCase =nums[i], nums[start] # backtrack
_lowerCAmelCase =[]
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__SCREAMING_SNAKE_CASE : Any = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 149 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
A_ : Tuple = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : List[Any] = 'tapas'
def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[int]=3_0_5_2_2 , __UpperCAmelCase : Optional[int]=7_6_8 , __UpperCAmelCase : Union[str, Any]=1_2 , __UpperCAmelCase : str=1_2 , __UpperCAmelCase : Optional[int]=3_0_7_2 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : List[str]=1_0_2_4 , __UpperCAmelCase : Union[str, Any]=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : str=1e-12 , __UpperCAmelCase : int=0 , __UpperCAmelCase : Optional[Any]=10.0 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Optional[int]=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[Any]=1.0 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=1.0 , __UpperCAmelCase : Optional[Any]=1.0 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : int="ratio" , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : str=6_4 , __UpperCAmelCase : Optional[Any]=3_2 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Any=True , __UpperCAmelCase : Any=False , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[Any]=None , **__UpperCAmelCase : List[Any] , ) -> Dict:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = {int(__UpperCAmelCase ): v for k, v in aggregation_labels.items()}
| 196 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict=1_3 , __UpperCAmelCase : Optional[Any]=3_0 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=3_2 , __UpperCAmelCase : int=5 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : Union[str, Any]=3_7 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Optional[Any]=1_0 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Any=0.6 , __UpperCAmelCase : Dict=None , ) -> str:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = mask_ratio
SCREAMING_SNAKE_CASE__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = ViTMAEModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Optional[int] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase__ : str = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : str = False
lowerCamelCase__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
# make masks reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(__UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE__ = pt_noise
super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE__ = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = ViTMAEModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE__ = ViTMAEConfig()
SCREAMING_SNAKE_CASE__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , noise=torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCAmelCase ) , atol=1e-4 ) )
| 196 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : str = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Optional[Any] = """RegNetConfig"""
# Base docstring
lowerCAmelCase : int = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Any = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = """tabby, tabby cat"""
lowerCAmelCase : Tuple = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , )
_lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
_lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) )
_lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ )
_lowerCAmelCase : int = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = config.num_channels
_lowerCAmelCase : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
_lowerCAmelCase : Tuple = self.embedder(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' )
_lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
_lowerCAmelCase : str = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.pooler(snake_case__ )
for layer_module in self.attention:
_lowerCAmelCase : Tuple = layer_module(snake_case__ )
_lowerCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase : Any = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ),
]
_lowerCAmelCase : List[str] = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
_lowerCAmelCase : int = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : Tuple = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_lowerCAmelCase : Tuple = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ),
]
_lowerCAmelCase : Tuple = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : List[Any] = layer_module(snake_case__ )
_lowerCAmelCase : Tuple = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowerCAmelCase : Optional[int] = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def a ( self , snake_case__ ):
'''simple docstring'''
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : str = hidden_states + (hidden_state,)
_lowerCAmelCase : List[str] = stage_module(snake_case__ )
if output_hidden_states:
_lowerCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
__magic_name__ = RegNetConfig
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = config
_lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' )
_lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' )
_lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
@unpack_inputs
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[str] = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[Any] = encoder_outputs[0]
_lowerCAmelCase : Tuple = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
_lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = RegNetConfig
__magic_name__ = "regnet"
__magic_name__ = "pixel_values"
@property
def a ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : List[Any] = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : Dict = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : str = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = config.num_labels
_lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' )
# classification head
_lowerCAmelCase : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ )
_lowerCAmelCase : Tuple = self.classifier[1](snake_case__ )
_lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
_lowerCAmelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = conv_kernel_size
_lowerCAmelCase : Optional[Any] = output_stride
_lowerCAmelCase : List[Any] = classifier_dropout_prob
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : str = scope
_lowerCAmelCase : Any = width_multiplier
_lowerCAmelCase : Union[str, Any] = ffn_dropout
_lowerCAmelCase : Optional[int] = attn_dropout
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = MobileViTVaModelTester(self )
_lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def a ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(snake_case__ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_lowerCAmelCase : List[str] = outputs.hidden_states
_lowerCAmelCase : List[str] = 5
self.assertEqual(len(snake_case__ ) , snake_case__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCAmelCase : List[Any] = 2
for i in range(len(snake_case__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
snake_case__ )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**snake_case__ )
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Any = model.to(snake_case__ )
_lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**snake_case__ )
_lowerCAmelCase : Dict = outputs.logits
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCAmelCase : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : List[Any] = model.to(snake_case__ )
_lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(**snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] )
_lowerCAmelCase : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case__ )
_lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
_lowerCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 630 | 0 |
lowerCamelCase : str = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 367 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE__ = (720, 1_280) # Height, Width
SCREAMING_SNAKE_CASE__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE__ = 1 / 100
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = 250
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = get_dataset(a , a )
for index in range(a ):
SCREAMING_SNAKE_CASE_ :Any = random.sample(range(len(a ) ) , 4 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = update_image_and_anno(
a , a , a , a , a , filter_scale=a , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE_ :int = random_chars(32 )
SCREAMING_SNAKE_CASE_ :Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
SCREAMING_SNAKE_CASE_ :Dict = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
for anno in new_annos:
SCREAMING_SNAKE_CASE_ :Any = anno[3] - anno[1]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = anno[4] - anno[2]
SCREAMING_SNAKE_CASE_ :Any = anno[1] + width / 2
SCREAMING_SNAKE_CASE_ :Optional[int] = anno[2] + height / 2
SCREAMING_SNAKE_CASE_ :Optional[int] = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(a )
with open(F"{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = []
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for label_file in glob.glob(os.path.join(a , "*.txt" ) ):
SCREAMING_SNAKE_CASE_ :List[str] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(a ) as in_file:
SCREAMING_SNAKE_CASE_ :List[Any] = in_file.readlines()
SCREAMING_SNAKE_CASE_ :Optional[Any] = os.path.join(a , F"{label_name}.jpg" )
SCREAMING_SNAKE_CASE_ :Dict = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE_ :Dict = obj_list.rstrip("\n" ).split(" " )
SCREAMING_SNAKE_CASE_ :Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ :Dict = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE_ :List[Any] = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def lowercase ( a , a , a , a , a , a = 0.0 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE_ :Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ :List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ :Any = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE_ :List[Any] = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE_ :Any = []
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for i, index in enumerate(a ):
SCREAMING_SNAKE_CASE_ :Optional[int] = all_img_list[index]
path_list.append(a )
SCREAMING_SNAKE_CASE_ :Tuple = all_annos[index]
SCREAMING_SNAKE_CASE_ :Any = cva.imread(a )
if i == 0: # top-left
SCREAMING_SNAKE_CASE_ :int = cva.resize(a , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ :Any = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Tuple = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ :Optional[Any] = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ :List[Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ :Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE_ :Dict = cva.resize(a , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ :Optional[int] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Dict = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ :Optional[int] = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE_ :List[str] = cva.resize(a , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ :int = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Tuple = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ :Dict = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ :List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE_ :Optional[Any] = cva.resize(
a , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Optional[int] = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ :Dict = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :List[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE_ :Optional[int] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase ( a ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE_ :Dict = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 631 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase : List[Any] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_2_8,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 5_0,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 1_0,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 1_0,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@classmethod
def a__ ( cls ) -> List[str]:
lowercase : Dict = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def a__ ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def a__ ( self ) -> Dict:
lowercase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("test-config" , use_auth_token=self._token )
lowercase : Tuple = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowercase , repo_id="test-config" , push_to_hub=_lowercase , use_auth_token=self._token )
lowercase : int = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def a__ ( self ) -> Any:
lowercase : Optional[int] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
lowercase : List[str] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowercase , repo_id="valid_org/test-config-org" , push_to_hub=_lowercase , use_auth_token=self._token )
lowercase : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def a__ ( self ) -> List[Any]:
CustomConfig.register_for_auto_class()
lowercase : Union[str, Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
lowercase : str = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 4_2 )
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
def a__ ( self ) -> Dict:
lowercase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase : Union[str, Any] = c.n_embd + 1 # int
lowercase : List[str] = c.resid_pdrop + 1.0 # float
lowercase : int = not c.scale_attn_weights # bool
lowercase : str = c.summary_type + "foo" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(_lowercase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowercase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowercase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowercase , c.summary_type , "mismatch for key: summary_type" )
def a__ ( self ) -> Optional[int]:
lowercase : Union[str, Any] = PretrainedConfig()
lowercase : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowercase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
lowercase : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(_lowercase , _lowercase )]
if len(_lowercase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F''' {', '.join(_lowercase )}.''' )
def a__ ( self ) -> Optional[int]:
with self.assertRaises(_lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
lowercase : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowercase )
def a__ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
lowercase : List[Any] = mock.Mock()
lowercase : Tuple = 5_0_0
lowercase : str = {}
lowercase : Union[str, Any] = HTTPError
lowercase : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowercase : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowercase ) as mock_head:
lowercase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def a__ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
lowercase : Union[str, Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def a__ ( self ) -> List[str]:
lowercase : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
lowercase : Union[str, Any] = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowercase )
lowercase : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowercase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase : Union[str, Any] = AutoConfig.from_pretrained(_lowercase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase : Union[str, Any] = ["config.42.0.0.json"]
lowercase : Optional[int] = 7_6_8
configuration.save_pretrained(_lowercase )
shutil.move(os.path.join(_lowercase , "config.4.0.0.json" ) , os.path.join(_lowercase , "config.42.0.0.json" ) )
lowercase : Optional[Any] = AutoConfig.from_pretrained(_lowercase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def a__ ( self ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase : Tuple = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
lowercase : Optional[Any] = "v4.0.0"
lowercase , lowercase : Dict = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowercase , return_unused_kwargs=_lowercase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowercase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase : Optional[Any] = "v3.0.0"
lowercase : int = old_transformers.models.auto.AutoConfig.from_pretrained(_lowercase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 703 |
'''simple docstring'''
def _A ( A ) -> list:
lowercase : Optional[Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase : List[str] = True
for i in range(0 ,len(A ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase , lowercase : List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase : Any = False
for i in range(1 ,len(A ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase , lowercase : Any = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
lowerCAmelCase : Optional[Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase : str = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 425 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] ="""convnextv2"""
def __init__( self : Union[str, Any] , UpperCamelCase : Dict=3 , UpperCamelCase : str=4 , UpperCamelCase : List[Any]=4 , UpperCamelCase : str=None , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=1e-1_2 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : List[Any]=2_24 , UpperCamelCase : Optional[int]=None , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Optional[int] = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Any = num_stages
_snake_case : Optional[Any] = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_snake_case : List[str] = [3, 3, 9, 3] if depths is None else depths
_snake_case : Optional[int] = hidden_act
_snake_case : Union[str, Any] = initializer_range
_snake_case : Any = layer_norm_eps
_snake_case : Tuple = drop_path_rate
_snake_case : str = image_size
_snake_case : Any = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
_snake_case , _snake_case : int = get_aligned_output_features_output_indices(
out_features=UpperCamelCase , out_indices=UpperCamelCase , stage_names=self.stage_names )
| 411 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""CLIPFeatureExtractor"""]
lowerCAmelCase_ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 411 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 715 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 0 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_lowerCAmelCase ):
_A :Union[str, Any] = ["note_seq"]
def __init__( self : Dict , *snake_case__ : Optional[int] , **snake_case__ : List[str] ):
requires_backends(self , ["""note_seq"""] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *snake_case__ : int , **snake_case__ : Tuple ):
requires_backends(cls , ["""note_seq"""] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *snake_case__ : Any , **snake_case__ : Optional[int] ):
requires_backends(cls , ["""note_seq"""] )
| 428 |
'''simple docstring'''
import re
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
lowerCAmelCase = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
UpperCAmelCase = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 433 | 0 |
"""simple docstring"""
import cmath
import math
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = math.radians(UpperCamelCase__ )
A__ = math.radians(UpperCamelCase__ )
# Convert voltage and current to rectangular form
A__ = cmath.rect(UpperCamelCase__ , UpperCamelCase__ )
A__ = cmath.rect(UpperCamelCase__ , UpperCamelCase__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 536 | """simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> str:
A__ = get_activation('swish' )
self.assertIsInstance(__UpperCAmelCase ,nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def snake_case__ ( self ) -> Optional[Any]:
A__ = get_activation('silu' )
self.assertIsInstance(__UpperCAmelCase ,nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def snake_case__ ( self ) -> List[str]:
A__ = get_activation('mish' )
self.assertIsInstance(__UpperCAmelCase ,nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def snake_case__ ( self ) -> List[str]:
A__ = get_activation('gelu' )
self.assertIsInstance(__UpperCAmelCase ,nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
| 536 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = 'blenderbot-small'
a__: Tuple = ['past_key_values']
a__: Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , UpperCAmelCase=5_0265 , UpperCAmelCase=512 , UpperCAmelCase=8 , UpperCAmelCase=2048 , UpperCAmelCase=16 , UpperCAmelCase=8 , UpperCAmelCase=2048 , UpperCAmelCase=16 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=512 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1 , UpperCAmelCase=False , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=2 , **UpperCAmelCase , ):
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
class __lowerCamelCase ( lowerCAmelCase ):
@property
def UpperCAmelCase__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase_ = {0: '''batch'''}
lowerCamelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCamelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCamelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
for i in range(UpperCAmelCase ):
lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCamelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def UpperCAmelCase__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = super().outputs
else:
lowerCamelCase_ = super(UpperCAmelCase , self ).outputs
if self.use_past:
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
for i in range(UpperCAmelCase ):
lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Generate decoder inputs
lowerCamelCase_ = seq_length if not self.use_past else 1
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase_ = dict(**UpperCAmelCase , **UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase_ , lowerCamelCase_ = common_inputs['''input_ids'''].shape
lowerCamelCase_ = common_inputs['''decoder_input_ids'''].shape[1]
lowerCamelCase_ , lowerCamelCase_ = self.num_attention_heads
lowerCamelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase_ = decoder_seq_length + 3
lowerCamelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCAmelCase , UpperCAmelCase )] , dim=1 )
lowerCamelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
lowerCamelCase_ = min(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = max(UpperCAmelCase , UpperCAmelCase ) - min_num_layers
lowerCamelCase_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
) )
# TODO: test this.
lowerCamelCase_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCAmelCase , UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) )
return common_inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase_ , lowerCamelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase_ = seqlen + 2
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
lowerCamelCase_ , lowerCamelCase_ = self.num_attention_heads
lowerCamelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase_ = common_inputs['''attention_mask'''].dtype
lowerCamelCase_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
lowerCamelCase_ = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(UpperCAmelCase )
]
return common_inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase_ = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase_ = tokenizer.num_special_tokens_to_add(UpperCAmelCase )
lowerCamelCase_ = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase_ = dict(tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase ) )
return common_inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
elif self.task == "causal-lm":
lowerCamelCase_ = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
else:
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
return common_inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = super()._flatten_past_key_values_(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
lowerCamelCase_ = super(UpperCAmelCase , self )._flatten_past_key_values_(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 29 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('''DataClass''', Any)
lowerCAmelCase__ = NewType('''DataClassType''', Any)
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def a__ ( SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
lowerCAmelCase : Any = {str(SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda SCREAMING_SNAKE_CASE : str_to_choice.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( *,
SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : Any = dataclasses.MISSING , SCREAMING_SNAKE_CASE : Callable[[], Any] = dataclasses.MISSING , SCREAMING_SNAKE_CASE : dict = None , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase : Tuple = {}
if aliases is not None:
lowerCAmelCase : Union[str, Any] = aliases
if help is not None:
lowerCAmelCase : Optional[Any] = help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , default_factory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Iterable[DataClassType]
def __init__( self , snake_case__ , **snake_case__ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
lowerCAmelCase : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**snake_case__ )
if dataclasses.is_dataclass(snake_case__ ):
lowerCAmelCase : List[Any] = [dataclass_types]
lowerCAmelCase : List[str] = list(snake_case__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(snake_case__ )
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = f"""--{field.name}"""
lowerCAmelCase : Optional[Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , snake_case__ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
lowerCAmelCase : List[str] = kwargs.pop("aliases" , [] )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : int = [aliases]
lowerCAmelCase : int = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(snake_case__ , "UnionType" ) and isinstance(snake_case__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(snake_case__ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f""" Problem encountered in field '{field.name}'.""" )
if type(snake_case__ ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase : str = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase : Optional[int] = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase : List[Any] = (
field.type.__args__[0] if isinstance(snake_case__ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase : List[Any] = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase : List[Any] = {}
if origin_type is Literal or (isinstance(field.type , snake_case__ ) and issubclass(field.type , snake_case__ )):
if origin_type is Literal:
lowerCAmelCase : str = field.type.__args__
else:
lowerCAmelCase : List[str] = [x.value for x in field.type]
lowerCAmelCase : List[Any] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase : int = field.default
else:
lowerCAmelCase : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase : Dict = copy(snake_case__ )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase : str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase : int = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase : Any = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase : List[str] = "?"
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase : Union[str, Any] = True
elif isclass(snake_case__ ) and issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = field.type.__args__[0]
lowerCAmelCase : List[str] = "+"
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase : int = True
else:
lowerCAmelCase : Optional[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase : Union[str, Any] = field.default_factory()
else:
lowerCAmelCase : List[str] = True
parser.add_argument(snake_case__ , *snake_case__ , **snake_case__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase : Any = False
parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if hasattr(snake_case__ , "_argument_group_name" ):
lowerCAmelCase : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase : Any = self
try:
lowerCAmelCase : Dict[str, type] = get_type_hints(snake_case__ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(snake_case__ ):
lowerCAmelCase : Optional[int] = ".".join(map(snake_case__ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(snake_case__ ):
if not field.init:
continue
lowerCAmelCase : Any = type_hints[field.name]
self._parse_dataclass_field(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__=None , snake_case__=False , snake_case__=True , snake_case__=None , snake_case__=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase : Dict = []
if args_filename:
args_files.append(Path(snake_case__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(snake_case__ , type=snake_case__ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase , lowerCAmelCase : List[Any] = args_file_parser.parse_known_args(args=snake_case__ )
lowerCAmelCase : Optional[int] = vars(snake_case__ ).get(args_file_flag.lstrip("-" ) , snake_case__ )
if cmd_args_file_paths:
args_files.extend([Path(snake_case__ ) for p in cmd_args_file_paths] )
lowerCAmelCase : Optional[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase : List[str] = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.parse_known_args(args=snake_case__ )
lowerCAmelCase : List[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase : Union[str, Any] = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
lowerCAmelCase : List[str] = {k: v for k, v in vars(snake_case__ ).items() if k in keys}
for k in keys:
delattr(snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = dtype(**snake_case__ )
outputs.append(snake_case__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(snake_case__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = set(args.keys() )
lowerCAmelCase : Optional[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase : Optional[Any] = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
lowerCAmelCase : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase : Tuple = dtype(**snake_case__ )
outputs.append(snake_case__ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(snake_case__ )}""" )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
with open(Path(snake_case__ ) , encoding="utf-8" ) as open_json_file:
lowerCAmelCase : Dict = json.loads(open_json_file.read() )
lowerCAmelCase : Union[str, Any] = self.parse_dict(snake_case__ , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.parse_dict(yaml.safe_load(Path(snake_case__ ).read_text() ) , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
| 645 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase__ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase__ = {'unk_token': '<unk>'}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
lowerCAmelCase__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self , **_UpperCamelCase ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_UpperCamelCase )
def UpperCamelCase__ ( self , **_UpperCamelCase ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_UpperCamelCase )
def UpperCamelCase__ ( self , **_UpperCamelCase ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ = self.get_image_processor(do_normalize=_UpperCamelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(_UpperCamelCase , return_tensors='np' )
lowerCAmelCase__ = processor(images=_UpperCamelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = processor(text=_UpperCamelCase , return_tensors='np' )
lowerCAmelCase__ = tokenizer(_UpperCamelCase , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'google/owlvit-base-patch32'
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = ['cat', 'nasa badge']
lowerCAmelCase__ = processor(text=_UpperCamelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'google/owlvit-base-patch32'
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = [['cat', 'nasa badge'], ['person']]
lowerCAmelCase__ = processor(text=_UpperCamelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(_UpperCamelCase )
lowerCAmelCase__ = max([len(_UpperCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'google/owlvit-base-patch32'
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = ['cat', 'nasa badge']
lowerCAmelCase__ = processor(text=_UpperCamelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs['input_ids']
lowerCAmelCase__ = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=_UpperCamelCase , query_images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
| 365 |
from __future__ import annotations
__snake_case : Any = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase__ = {}
lowerCAmelCase__ = source_vertex
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {self.source_vertex}
lowerCAmelCase__ = None
lowerCAmelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_UpperCamelCase )
lowerCAmelCase__ = vertex
queue.append(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase__ = self.parent.get(_UpperCamelCase )
if target_vertex_parent is None:
lowerCAmelCase__ = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_UpperCamelCase )
return self.shortest_path(_UpperCamelCase ) + F"->{target_vertex}"
if __name__ == "__main__":
__snake_case : Optional[Any] = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 365 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DebertaVaTokenizer
SCREAMING_SNAKE_CASE__ : Any = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : List[Any] = True
def A_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = DebertaVaTokenizer(snake_case , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "this is a test"
UpperCAmelCase : Any = "this is a test"
return input_text, output_text
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = "<pad>"
UpperCAmelCase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(snake_case ) , 3_0_0_0_1 )
def A_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase : Any = DebertaVaTokenizer(snake_case , do_lower_case=snake_case )
UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : str = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case )
UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase : Optional[int] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase : Dict = DebertaVaTokenizer(snake_case , split_by_punct=snake_case )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : List[Any] = DebertaVaTokenizerFast(snake_case , split_by_punct=snake_case )
UpperCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = "I was born in 92000, and this is falsé."
UpperCAmelCase : List[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase : List[str] = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case )
UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Dict = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case )
UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
UpperCAmelCase : Union[str, Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase : Optional[int] = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Dict = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case )
UpperCAmelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = "I was born in 92000, and this is falsé."
UpperCAmelCase : int = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase : List[str] = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : int = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case )
UpperCAmelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase : Dict = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase : List[Any] = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : str = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case )
UpperCAmelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : int = self.get_rust_tokenizer()
UpperCAmelCase : Any = "I was born in 92000, and this is falsé."
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
UpperCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(snake_case )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = "This is a test"
UpperCAmelCase : Union[str, Any] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
UpperCAmelCase : int = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase : Tuple = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase : Optional[int] = DebertaVaTokenizer(snake_case , keep_accents=snake_case )
UpperCAmelCase : Any = DebertaVaTokenizerFast(snake_case , keep_accents=snake_case )
UpperCAmelCase : List[Any] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Tuple = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Any = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(snake_case , snake_case )
# fmt: off
UpperCAmelCase : Any = "I was born in 92000, and this is falsé."
UpperCAmelCase : Optional[int] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
UpperCAmelCase : Any = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : str = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Optional[Any] = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(snake_case )
UpperCAmelCase : Tuple = tokenizer.encode("sequence builders" )
UpperCAmelCase : Optional[Any] = tokenizer.encode("multi-sequence build" )
UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case )
UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case , )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 679 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__A =pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
__A =dataset.iloc[:, 1:2].values
__A =dataset.iloc[:, 2].values
__A , __A , __A , __A =train_test_split(X, y, test_size=0.2, random_state=0)
__A =PolynomialFeatures(degree=4)
__A =poly_reg.fit_transform(X)
__A =LinearRegression()
pol_reg.fit(X_poly, y)
def a ( ):
'''simple docstring'''
plt.scatter(_UpperCAmelCase , _UpperCAmelCase , color='''red''' )
plt.plot(_UpperCAmelCase , pol_reg.predict(poly_reg.fit_transform(_UpperCAmelCase ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 241 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 241 | 1 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase__( snake_case_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
__lowercase = field
__lowercase = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
__lowercase = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def __magic_name__ ( self ):
"""simple docstring"""
if self.streaming:
__lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
__lowercase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
__lowercase = dataset
__lowercase = path_or_buf
__lowercase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowercase = num_proc
__lowercase = """utf-8"""
__lowercase = to_json_kwargs
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.to_json_kwargs.pop("""path_or_buf""" , __UpperCAmelCase )
__lowercase = self.to_json_kwargs.pop("""orient""" , """records""" )
__lowercase = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
__lowercase = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
__lowercase = self.to_json_kwargs.pop("""compression""" , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=__UpperCAmelCase ) as buffer:
__lowercase = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""" )
__lowercase = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = args
__lowercase = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowercase = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ):
"""simple docstring"""
__lowercase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
__lowercase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
__lowercase , __lowercase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 566 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 ):
'''simple docstring'''
__lowercase = right or len(__UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCamelCase , __UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566 | 1 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_lowerCAmelCase :Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_lowerCAmelCase :Dict = """main"""
# Default branch name
_lowerCAmelCase :Union[str, Any] = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
_lowerCAmelCase :Union[str, Any] = """aaaaaaa"""
# This commit does not exist, so we should 404.
_lowerCAmelCase :int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
_lowerCAmelCase :List[Any] = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
print('Bonjour!' )
yield
print('Au revoir!' )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> str:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def _UpperCamelCase ( self , lowercase__ ) -> str:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def _UpperCamelCase ( self , lowercase__ ) -> Any:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def _UpperCamelCase ( self , lowercase__ ) -> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
self.assertEqual(find_labels(lowercase__ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase__ ) , ['start_positions', 'end_positions'] )
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
@require_tf
def _UpperCamelCase ( self ) -> str:
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
self.assertEqual(find_labels(lowercase__ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase__ ) , ['start_positions', 'end_positions'] )
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
@require_flax
def _UpperCamelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(lowercase__ ) , [] )
self.assertEqual(find_labels(lowercase__ ) , [] )
self.assertEqual(find_labels(lowercase__ ) , [] )
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
self.assertEqual(find_labels(lowercase__ ) , [] )
| 701 | '''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=99 , lowercase__=64 , lowercase__=5 , lowercase__=4 , lowercase__=64 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def _UpperCamelCase ( self ) -> Union[str, Any]:
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> Tuple:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = MPNetModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = MPNetForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MPNetForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE : Any = MPNetForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : str = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : int = True
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : int = MPNetModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Tuple = MPNetModel.from_pretrained('microsoft/mpnet-base' )
SCREAMING_SNAKE_CASE : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase__ )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1E-4 ) )
| 179 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = params
lowercase = np.array(snake_case )
lowercase = np.array([len(snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , snake_case ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def SCREAMING_SNAKE_CASE__ ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.params.max_model_input_size
lowercase = self.lengths > max_len
logger.info(F'''Splitting {sum(snake_case )} too long sequences.''' )
def divide_chunks(snake_case , snake_case ):
return [l[i : i + n] for i in range(0 , len(snake_case ) , snake_case )]
lowercase = []
lowercase = []
if self.params.mlm:
lowercase , lowercase = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowercase , lowercase = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowercase = np.insert(snake_case , 0 , snake_case )
if sub_s[-1] != sep_id:
lowercase = np.insert(snake_case , len(snake_case ) , snake_case )
assert len(snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(snake_case )
new_tok_ids.extend(snake_case )
new_lengths.extend([len(snake_case ) for l in sub_seqs] )
lowercase = np.array(snake_case )
lowercase = np.array(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = len(self )
lowercase = self.lengths > 11
lowercase = self.token_ids[indices]
lowercase = self.lengths[indices]
lowercase = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase = self.params.special_tok_ids['unk_token']
lowercase = len(self )
lowercase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase = (unk_occs / self.lengths) < 0.5
lowercase = self.token_ids[indices]
lowercase = self.lengths[indices]
lowercase = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = [t[0] for t in batch]
lowercase = [t[1] for t in batch]
assert len(snake_case ) == len(snake_case )
# Max for paddings
lowercase = max(snake_case )
# Pad token ids
if self.params.mlm:
lowercase = self.params.special_tok_ids['pad_token']
else:
lowercase = self.params.special_tok_ids['unk_token']
lowercase = [list(t.astype(snake_case ) ) + [pad_idx] * (max_seq_len_ - len(snake_case )) for t in token_ids]
assert len(tk_ ) == len(snake_case )
assert all(len(snake_case ) == max_seq_len_ for t in tk_ )
lowercase = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase = torch.tensor(snake_case ) # (bs)
return tk_t, lg_t
| 84 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __A (unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=0.9 , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ):
__UpperCAmelCase : Tuple = size if size is not None else {"shortest_edge": 30}
__UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {"height": 30, "width": 30}
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = min_resolution
__UpperCAmelCase : Union[str, Any] = max_resolution
__UpperCAmelCase : Optional[int] = do_resize_and_center_crop
__UpperCAmelCase : Any = size
__UpperCAmelCase : Dict = crop_pct
__UpperCAmelCase : Optional[Any] = crop_size
__UpperCAmelCase : Optional[int] = do_normalize
__UpperCAmelCase : Union[str, Any] = image_mean
__UpperCAmelCase : List[str] = image_std
def _snake_case ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _snake_case ( self ):
__UpperCAmelCase : str = PoolFormerImageProcessingTester(self )
@property
def _snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "crop_pct" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "image_std" ) )
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
__UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : int = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 168 | 0 |
from __future__ import annotations
from random import choice
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
return choice(_A )
def a_ ( _A , _A ) -> int:
"""simple docstring"""
snake_case__ = random_pivot(_A )
# partition based on pivot
# linear time
snake_case__ = [e for e in lst if e < pivot]
snake_case__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_A ) < k - 1:
return kth_number(_A , k - len(_A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "roformer"
def __init__( self: Tuple , UpperCamelCase: Optional[Any]=5_00_00 , UpperCamelCase: str=None , UpperCamelCase: Any=7_68 , UpperCamelCase: Dict=12 , UpperCamelCase: List[Any]=12 , UpperCamelCase: List[str]=30_72 , UpperCamelCase: int="gelu" , UpperCamelCase: str=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Any=15_36 , UpperCamelCase: Dict=2 , UpperCamelCase: Dict=0.02 , UpperCamelCase: List[str]=1e-12 , UpperCamelCase: int=0 , UpperCamelCase: Any=False , UpperCamelCase: int=True , **UpperCamelCase: List[Any] , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
snake_case__ = vocab_size
snake_case__ = hidden_size if embedding_size is None else embedding_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = rotary_value
snake_case__ = use_cache
class __SCREAMING_SNAKE_CASE( a_ ):
@property
def lowerCAmelCase_ ( self: Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ = {0: 'batch', 1: 'sequence'}
snake_case__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 372 | 0 |
"""simple docstring"""
from PIL import Image
def UpperCAmelCase ( A : Image , A : int ):
'''simple docstring'''
_UpperCAmelCase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(A : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(A )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
lowercase = change_contrast(img, 1_70)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 573 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=30 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = FlaxViTModel(config=snake_case )
_UpperCAmelCase = model(snake_case )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase = (self.patch_size, self.patch_size)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Tuple:
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FlaxViTForImageClassification(config=snake_case )
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FlaxViTForImageClassification(snake_case )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self ) -> None:
_UpperCAmelCase = FlaxViTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(snake_case , snake_case )
_UpperCAmelCase = model_class(snake_case )
@jax.jit
def model_jitted(snake_case , **snake_case ):
return model(pixel_values=snake_case , **snake_case )
with self.subTest('JIT Enabled' ):
_UpperCAmelCase = model_jitted(**snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('google/vit-base-patch16-224' )
_UpperCAmelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case )
| 573 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowerCAmelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any=False ) -> Optional[int]:
try:
_UpperCamelCase : List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCamelCase : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCamelCase : List[str] = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE = parse_flag_from_env("""RUN_SLOW""", default=False)
_SCREAMING_SNAKE_CASE = parse_flag_from_env("""RUN_REMOTE""", default=False)
_SCREAMING_SNAKE_CASE = parse_flag_from_env("""RUN_LOCAL""", default=True)
_SCREAMING_SNAKE_CASE = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> Optional[Any]:
try:
import faiss # noqa
except ImportError:
_UpperCamelCase : Optional[int] = unittest.skip("test requires faiss" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
try:
import regex # noqa
except ImportError:
_UpperCamelCase : List[Any] = unittest.skip("test requires regex" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Tuple ) -> Dict:
try:
import elasticsearch # noqa
except ImportError:
_UpperCamelCase : Tuple = unittest.skip("test requires elasticsearch" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Tuple ) -> List[str]:
try:
import sqlalchemy # noqa
except ImportError:
_UpperCamelCase : Optional[int] = unittest.skip("test requires sqlalchemy" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Any ) -> str:
if not config.TORCH_AVAILABLE:
_UpperCamelCase : Optional[int] = unittest.skip("test requires PyTorch" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Any ) -> Dict:
if not config.TF_AVAILABLE:
_UpperCamelCase : str = unittest.skip("test requires TensorFlow" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Union[str, Any] ) -> List[str]:
if not config.JAX_AVAILABLE:
_UpperCamelCase : Optional[Any] = unittest.skip("test requires JAX" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : List[Any] ) -> Optional[Any]:
if not config.PIL_AVAILABLE:
_UpperCamelCase : List[Any] = unittest.skip("test requires Pillow" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Dict ) -> int:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__lowerCAmelCase )
else:
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Union[str, Any] ) -> List[str]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__lowerCAmelCase )
else:
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Tuple ) -> Tuple:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__lowerCAmelCase )
else:
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Any ) -> Union[str, Any]:
def _require_spacy_model(__lowerCAmelCase : Tuple ):
try:
import spacy # noqa F401
spacy.load(__lowerCAmelCase )
except ImportError:
return unittest.skip("test requires spacy" )(__lowerCAmelCase )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__lowerCAmelCase ) )(__lowerCAmelCase )
else:
return test_case
return _require_spacy_model
def __lowerCAmelCase ( __lowerCAmelCase : List[str] ) -> List[Any]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__lowerCAmelCase )
else:
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Tuple ) -> str:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__lowerCAmelCase )
else:
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Any ) -> Dict:
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCamelCase : Dict = unittest.skip("test is slow" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Any ) -> List[Any]:
if not _run_local_tests or _run_local_tests == 0:
_UpperCamelCase : Union[str, Any] = unittest.skip("test is local" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> Union[str, Any]:
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCamelCase : Union[str, Any] = unittest.skip("test is packaged" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCamelCase : Optional[Any] = unittest.skip("test requires remote" )(__lowerCAmelCase )
return test_case
def __lowerCAmelCase ( *__lowerCAmelCase : Dict ) -> Dict:
def decorate(cls : Union[str, Any] ):
for name, fn in cls.__dict__.items():
if callable(__lowerCAmelCase ) and name.startswith("test" ):
for decorator in decorators:
_UpperCamelCase : Tuple = decorator(__lowerCAmelCase )
setattr(cls , __lowerCAmelCase , __lowerCAmelCase )
return cls
return decorate
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
pass
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def __lowerCAmelCase ( __lowerCAmelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowerCAmelCase : Dict=1e-1_6 ) -> Optional[Any]:
_UpperCamelCase : int = requests.Session().request
def timeout_request(__lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
_UpperCamelCase : str = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
_UpperCamelCase : int = timeout
try:
return online_request(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCamelCase : str = url
_UpperCamelCase : List[str] = e.args[0]
_UpperCamelCase : Union[str, Any] = (max_retry_error.args[0].replace("10.255.255.1" , f"OfflineMock[{url}]" ),)
_UpperCamelCase : List[Any] = (max_retry_error,)
raise
def raise_connection_error(__lowerCAmelCase : Any , __lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ):
raise requests.ConnectionError("Offline mode is enabled." , request=__lowerCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , __lowerCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , __lowerCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCAmelCase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __lowerCAmelCase ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[Any] ) -> List[Any]:
_UpperCamelCase : List[Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowerCAmelCase , **__lowerCAmelCase ) as tmp_dir:
try:
os.chdir(__lowerCAmelCase )
yield
finally:
os.chdir(__lowerCAmelCase )
@contextmanager
def __lowerCAmelCase ( ) -> Optional[Any]:
import gc
gc.collect()
_UpperCamelCase : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowerCAmelCase ( ) -> Optional[int]:
import gc
gc.collect()
_UpperCamelCase : Dict = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowerCAmelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ) -> Optional[Any]:
return deepcopy(__lowerCAmelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowerCAmelCase ).integers(0 , 100 , 10 ).tolist()
def __lowerCAmelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowerCAmelCase : Union[str, Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ):
try:
return func(*__lowerCAmelCase , **__lowerCAmelCase )
except HTTPError as err:
if str(__lowerCAmelCase ).startswith("500" ) or str(__lowerCAmelCase ).startswith("502" ):
pytest.xfail(str(__lowerCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __lowerCAmelCase )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = returncode
_UpperCamelCase : str = stdout
_UpperCamelCase : int = stderr
async def __lowerCAmelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> List[str]:
while True:
_UpperCamelCase : List[str] = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def __lowerCAmelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : str=False , __lowerCAmelCase : List[Any]=False ) -> _RunOutput:
if echo:
print("\nRunning: " , " ".join(__lowerCAmelCase ) )
_UpperCamelCase : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = []
def tee(__lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]="" ):
_UpperCamelCase : Tuple = line.decode("utf-8" ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label="stderr:" ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=180 , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Dict=True ) -> _RunOutput:
_UpperCamelCase : List[Any] = asyncio.get_event_loop()
_UpperCamelCase : Dict = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
_UpperCamelCase : Any = " ".join(__lowerCAmelCase )
if result.returncode > 0:
_UpperCamelCase : int = "\n".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def __lowerCAmelCase ( ) -> str:
_UpperCamelCase : Optional[Any] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
_UpperCamelCase : Any = re.sub(R"^gw" , "" , __lowerCAmelCase , 0 , re.M )
return int(__lowerCAmelCase )
def __lowerCAmelCase ( ) -> int:
_UpperCamelCase : List[str] = 29500
_UpperCamelCase : Dict = pytest_xdist_worker_id()
return port + uniq_delta
| 239 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """vit_mae"""
def __init__(self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=2_24 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=16 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=0.75 , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Dict = image_size
_UpperCamelCase : Union[str, Any] = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Optional[int] = qkv_bias
_UpperCamelCase : List[str] = decoder_num_attention_heads
_UpperCamelCase : int = decoder_hidden_size
_UpperCamelCase : Dict = decoder_num_hidden_layers
_UpperCamelCase : Dict = decoder_intermediate_size
_UpperCamelCase : str = mask_ratio
_UpperCamelCase : List[str] = norm_pix_loss
| 239 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ : Optional[int] = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ : Any = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ : Dict = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ : int = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 105 |
import math
from numpy import inf
from scipy.integrate import quad
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , args=(SCREAMING_SNAKE_CASE) )[0]
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return math.pow(SCREAMING_SNAKE_CASE , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 563 | 0 |
"""simple docstring"""
def UpperCamelCase ( _A , _A , _A , _A , _A , _A ) -> Tuple:
if index == r:
for j in range(_A ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowercase : str = arr[i]
combination_util(_A , _A , _A , index + 1 , _A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_A , _A , _A , _A , _A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
lowercase : Dict = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_A , _A , _A , 0 , _A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowerCAmelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 717 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Dict = """van"""
def __init__( self :List[Any] , __magic_name__ :Optional[Any]=224 , __magic_name__ :Union[str, Any]=3 , __magic_name__ :Dict=[7, 3, 3, 3] , __magic_name__ :int=[4, 2, 2, 2] , __magic_name__ :Optional[Any]=[64, 128, 320, 512] , __magic_name__ :List[str]=[3, 3, 12, 3] , __magic_name__ :Any=[8, 8, 4, 4] , __magic_name__ :str="gelu" , __magic_name__ :int=0.02 , __magic_name__ :List[Any]=1E-6 , __magic_name__ :Optional[Any]=1E-2 , __magic_name__ :Optional[Any]=0.0 , __magic_name__ :Any=0.0 , **__magic_name__ :Optional[Any] , ) ->Any:
super().__init__(**__magic_name__ )
lowercase : Dict = image_size
lowercase : Optional[int] = num_channels
lowercase : List[str] = patch_sizes
lowercase : List[Any] = strides
lowercase : List[Any] = hidden_sizes
lowercase : int = depths
lowercase : str = mlp_ratios
lowercase : Optional[Any] = hidden_act
lowercase : Optional[Any] = initializer_range
lowercase : Optional[Any] = layer_norm_eps
lowercase : Optional[int] = layer_scale_init_value
lowercase : Union[str, Any] = drop_path_rate
lowercase : Tuple = dropout_rate
| 348 | 0 |
def __A(lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase , int(b / 2 ) ) * actual_power(lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase , int(b / 2 ) ) * actual_power(lowerCAmelCase , int(b / 2 ) )
def __A(lowerCAmelCase , lowerCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCAmelCase , lowerCAmelCase )
return actual_power(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 612 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
__UpperCAmelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 303 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self : Dict , lowerCAmelCase_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Any:
"""simple docstring"""
super().__init__()
_a = nn.ModuleList(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : List[torch.tensor] , lowerCAmelCase_ : List[float] , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , ) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ , self.nets ) ):
_a = controlnet(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# merge samples
if i == 0:
_a = down_samples, mid_sample
else:
_a = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Callable = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
_a = 0
_a = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCAmelCase_ , is_main_process=lowerCAmelCase_ , save_function=lowerCAmelCase_ , safe_serialization=lowerCAmelCase_ , variant=lowerCAmelCase_ , )
idx += 1
_a = model_path_to_save + F'_{idx}'
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , lowerCAmelCase_ : Optional[Union[str, os.PathLike]] , **lowerCAmelCase_ : List[Any] ) -> Any:
"""simple docstring"""
_a = 0
_a = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_a = pretrained_model_path
while os.path.isdir(lowerCAmelCase_ ):
_a = ControlNetModel.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
controlnets.append(lowerCAmelCase_ )
idx += 1
_a = pretrained_model_path + F'_{idx}'
logger.info(F'{len(lowerCAmelCase_ )} controlnets loaded from {pretrained_model_path}.' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(lowerCAmelCase_ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(lowerCAmelCase_ )
| 706 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case_ (UpperCamelCase : BertModel , UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
_a = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(UpperCamelCase ):
os.makedirs(UpperCamelCase )
_a = model.state_dict()
def to_tf_var_name(UpperCamelCase : str ):
for patt, repl in iter(UpperCamelCase ):
_a = name.replace(UpperCamelCase , UpperCamelCase )
return f'bert/{name}'
def create_tf_var(UpperCamelCase : np.ndarray , UpperCamelCase : str , UpperCamelCase : tf.Session ):
_a = tf.dtypes.as_dtype(tensor.dtype )
_a = tf.get_variable(dtype=UpperCamelCase , shape=tensor.shape , name=UpperCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_a = to_tf_var_name(UpperCamelCase )
_a = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_a = torch_tensor.T
_a = create_tf_var(tensor=UpperCamelCase , name=UpperCamelCase , session=UpperCamelCase )
tf.keras.backend.set_value(UpperCamelCase , UpperCamelCase )
_a = session.run(UpperCamelCase )
print(f'Successfully created {tf_name}: {np.allclose(UpperCamelCase , UpperCamelCase )}' )
_a = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase , os.path.join(UpperCamelCase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def snake_case_ (UpperCamelCase : Tuple=None ):
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase , required=UpperCamelCase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=UpperCamelCase , default=UpperCamelCase , required=UpperCamelCase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=UpperCamelCase , required=UpperCamelCase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=UpperCamelCase , required=UpperCamelCase , help='''Directory in which to save tensorflow model''' )
_a = parser.parse_args(UpperCamelCase )
_a = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 377 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = """detr"""
a__ : Union[str, Any] = ["""past_key_values"""]
a__ : Tuple = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_00 , SCREAMING_SNAKE_CASE__ : List[Any]=6 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Tuple="relu" , SCREAMING_SNAKE_CASE__ : Dict=2_56 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Any="sine" , SCREAMING_SNAKE_CASE__ : List[Any]="resnet50" , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , **SCREAMING_SNAKE_CASE__ : str , ) -> Dict:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.get('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# set timm attributes to None
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None, None, None
__lowerCamelCase = use_timm_backbone
__lowerCamelCase = backbone_config
__lowerCamelCase = num_channels
__lowerCamelCase = num_queries
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = encoder_layers
__lowerCamelCase = auxiliary_loss
__lowerCamelCase = position_embedding_type
__lowerCamelCase = backbone
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = dilation
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = mask_loss_coefficient
__lowerCamelCase = dice_loss_coefficient
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __A ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : Optional[int] ) -> int:
return self.d_model
@classmethod
def __A ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
return cls(backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : str ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
class lowerCAmelCase__ ( __lowercase ):
a__ : Union[str, Any] = version.parse("""1.11""" )
@property
def __A ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self : Any ) -> float:
return 1e-5
@property
def __A ( self : Any ) -> int:
return 12
| 298 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( __lowercase , __lowercase ):
@register_to_config
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None ) -> List[str]:
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase__ ( __lowercase ):
a__ : VQModel
a__ : CLIPTextModel
a__ : CLIPTokenizer
a__ : TransformeraDModel
a__ : LearnedClassifierFreeSamplingEmbeddings
a__ : VQDiffusionScheduler
def __init__( self : int , SCREAMING_SNAKE_CASE__ : VQModel , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : TransformeraDModel , SCREAMING_SNAKE_CASE__ : VQDiffusionScheduler , SCREAMING_SNAKE_CASE__ : LearnedClassifierFreeSamplingEmbeddings , ) -> Any:
super().__init__()
self.register_modules(
vqvae=SCREAMING_SNAKE_CASE__ , transformer=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(SCREAMING_SNAKE_CASE__ , 1 , 1 )
else:
__lowerCamelCase = [''''''] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE__ )}.''' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ ).sample
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(SCREAMING_SNAKE_CASE__ , dim=1 , keepdim=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.vqvae.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float ) -> torch.FloatTensor:
__lowerCamelCase , __lowerCamelCase = torch.sort(SCREAMING_SNAKE_CASE__ , 1 , descending=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.exp(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 298 | 1 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCAmelCase_ , n - 1 , lowerCAmelCase_) * a) % mod
else:
lowerCamelCase_ : Tuple = binary_exponentiation(lowerCAmelCase_ , n / 2 , lowerCAmelCase_)
return (b * b) % mod
# a prime number
__magic_name__ = 7_0_1
__magic_name__ = 1_0_0_0_0_0_0_0_0_0
__magic_name__ = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 73 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 1 |
import torch
def lowerCAmelCase_ ( ):
if torch.cuda.is_available():
__snake_case : int = torch.cuda.device_count()
else:
__snake_case : Tuple = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 81 |
from __future__ import annotations
import bisect
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =0
__UpperCAmelCase =len(A_ ) - 1
while left <= right:
__UpperCAmelCase =left + (right - left) // 2
__UpperCAmelCase =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase =midpoint - 1
else:
__UpperCAmelCase =midpoint + 1
return None
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =bisect.bisect_left(A_ , A_ )
if index != len(A_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None:
"""simple docstring"""
if right < left:
return None
__UpperCAmelCase =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 )
else:
return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ )
if __name__ == "__main__":
__A = input("Enter numbers separated by comma:\n").strip()
__A = sorted(int(item) for item in user_input.split(","))
__A = int(input("Enter a single number to be found in the list:\n"))
__A = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 68 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=99 , lowerCamelCase=0 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=2 , lowerCamelCase=0.0_2 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase="last" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_lengths
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = gelu_activation
snake_case__ = sinusoidal_embeddings
snake_case__ = causal
snake_case__ = asm
snake_case__ = n_langs
snake_case__ = vocab_size
snake_case__ = n_special
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = summary_type
snake_case__ = use_proj
snake_case__ = scope
snake_case__ = bos_token_id
def A_ ( self ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_input_lengths:
snake_case__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , 2 ).float()
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A_ ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = XLMModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ = model(lowerCamelCase , lengths=lowerCamelCase , langs=lowerCamelCase )
snake_case__ = model(lowerCamelCase , langs=lowerCamelCase )
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = XLMWithLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ = model(lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = XLMForQuestionAnsweringSimple(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ = model(lowerCamelCase )
snake_case__ = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
snake_case__ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = XLMForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ = model(lowerCamelCase )
snake_case__ = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , p_mask=lowerCamelCase , )
snake_case__ = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , )
((snake_case__ ) , ) = result_with_labels.to_tuple()
snake_case__ = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
((snake_case__ ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = XLMForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ = model(lowerCamelCase )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = self.num_labels
snake_case__ = XLMForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = self.num_choices
snake_case__ = XLMForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
_A : Optional[int] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : List[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A : Tuple = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
snake_case__ = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
snake_case__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
return inputs_dict
def A_ ( self ):
snake_case__ = XLMModelTester(self )
snake_case__ = ConfigTester(self , config_class=lowerCamelCase , emb_dim=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1 ):
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertListEqual(
[isinstance(lowerCamelCase , lowerCamelCase ) for iter_attentions in attentions] , [True] * len(lowerCamelCase ) )
self.assertEqual(len(lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase ):
# adds PAD dummy token
snake_case__ = min_length + idx + 1
snake_case__ = min_length + idx + 1
snake_case__ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase ) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1 ):
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertListEqual(
[isinstance(lowerCamelCase , lowerCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase ) , )
self.assertEqual(len(lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase ):
# adds PAD dummy token
snake_case__ = min_length + idx + 1
snake_case__ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase ) , )
pass
@slow
def A_ ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = XLMModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def A_ ( self ):
snake_case__ = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(lowerCamelCase )
snake_case__ = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCamelCase ) # the president
snake_case__ = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case__ = model.generate(lowerCamelCase , do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase )
| 712 |
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = []
snake_case__ = 11
snake_case__ = int("1" + "0" * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
snake_case__ = 10
return solutions
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 2 ):
snake_case__ = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
snake_case__ = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 530 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCamelCase : List[str] = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowerCAmelCase = os.path.abspath('examples' )
for item in os.listdir(_snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase = os.path.join(_snake_case , _snake_case )
if os.path.isfile(_snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=_snake_case , feature_script=_snake_case , tested_section='main()' if parser_only else 'training_function()' , ):
lowerCAmelCase = compare_against_test(
os.path.join(_snake_case , _snake_case ) , _snake_case , _snake_case , _snake_case )
lowerCAmelCase = '\n'.join(_snake_case )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase = diff.replace(_snake_case , '' )
self.assertEqual(_snake_case , '' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.one_complete_example('complete_nlp_example.py' , _snake_case )
self.one_complete_example('complete_nlp_example.py' , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowerCAmelCase = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , _snake_case , _snake_case , _snake_case )
self.one_complete_example('complete_cv_example.py' , _snake_case , _snake_case , _snake_case )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class a ( a__ ):
snake_case__ = False
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
super().setUpClass()
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=_snake_case )
self.assertNotIn('epoch 0:' , _snake_case )
self.assertIn('epoch 1:' , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=_snake_case )
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , _snake_case )
self.assertIn('epoch 1:' , _snake_case )
else:
self.assertIn('epoch 0:' , _snake_case )
self.assertIn('epoch 1:' , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=_snake_case )
lowerCAmelCase = re.findall('({.+})' , _snake_case )
lowerCAmelCase = [r for r in results if 'accuracy' in r][-1]
lowerCAmelCase = ast.literal_eval(_snake_case )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_snake_case , 'tracking' ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 4 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase (__snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = LongformerTokenizer
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : List[str] = LongformerTokenizerFast
_SCREAMING_SNAKE_CASE : Dict = True
def __snake_case ( self :Dict ) ->Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase : Tuple = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowercase : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase : Dict = {"""unk_token""": """<unk>"""}
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def __snake_case ( self :List[Any] , **__magic_name__ :Any ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __snake_case ( self :Optional[Any] , **__magic_name__ :Optional[Any] ) ->Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __snake_case ( self :Tuple , __magic_name__ :Dict ) ->str:
lowercase : List[str] = """lower newer"""
lowercase : Any = """lower newer"""
return input_text, output_text
def __snake_case ( self :Tuple ) ->Union[str, Any]:
lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : List[str] = """lower newer"""
lowercase : Tuple = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase : Optional[int] = tokenizer.tokenize(__magic_name__ ) # , add_prefix_space=True)
self.assertListEqual(__magic_name__ , __magic_name__ )
lowercase : str = tokens + [tokenizer.unk_token]
lowercase : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def __snake_case ( self :Any ) ->str:
lowercase : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__magic_name__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__magic_name__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def __snake_case ( self :Tuple ) ->Union[str, Any]:
lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
lowercase : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
lowercase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : List[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : int = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __snake_case ( self :Optional[Any] ) ->int:
lowercase : Optional[int] = self.get_tokenizer()
lowercase : Tuple = """Encode this sequence."""
lowercase : Dict = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowercase : List[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
lowercase : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__magic_name__ , __magic_name__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowercase : Union[str, Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
lowercase : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
# Testing spaces after special tokens
lowercase : Any = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ )} ) # mask token has a left space
lowercase : Any = tokenizer.convert_tokens_to_ids(__magic_name__ )
lowercase : Any = """Encode <mask> sequence"""
lowercase : str = """Encode <mask>sequence"""
lowercase : Optional[int] = tokenizer.encode(__magic_name__ )
lowercase : List[str] = encoded.index(__magic_name__ )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__magic_name__ , __magic_name__ )
lowercase : Tuple = tokenizer.encode(__magic_name__ )
lowercase : List[str] = encoded.index(__magic_name__ )
lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
def __snake_case ( self :Any ) ->int:
pass
def __snake_case ( self :List[Any] ) ->str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : int = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
lowercase : List[str] = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
lowercase : Optional[int] = """A, <mask> AllenNLP sentence."""
lowercase : Any = tokenizer_r.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ )
lowercase : str = tokenizer_p.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__magic_name__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__magic_name__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __snake_case ( self :List[str] ) ->Tuple:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : str = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __magic_name__ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __magic_name__ )
self.assertEqual(post_processor_state["""trim_offsets"""] , __magic_name__ )
def __snake_case ( self :Dict ) ->List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : Optional[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase : Optional[Any] = f"""{text_of_1_token} {text_of_1_token}"""
lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : List[Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : List[Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : List[str] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ), len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Optional[int] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ), len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : Optional[int] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Union[str, Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : int = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Optional[int] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ), 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : str = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Any = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ), 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
| 264 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
UpperCAmelCase = 8
def lowercase ( a__ : Dict , a__ : List[Any]=BITS ) -> Union[str, Any]:
_UpperCamelCase = x.device
_UpperCamelCase = (x * 255).int().clamp(0 , 255 )
_UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a__ )
_UpperCamelCase = rearrange(a__ , '''d -> d 1 1''' )
_UpperCamelCase = rearrange(a__ , '''b c h w -> b c 1 h w''' )
_UpperCamelCase = ((x & mask) != 0).float()
_UpperCamelCase = rearrange(a__ , '''b c d h w -> b (c d) h w''' )
_UpperCamelCase = bits * 2 - 1
return bits
def lowercase ( a__ : List[Any] , a__ : List[Any]=BITS ) -> Optional[Any]:
_UpperCamelCase = x.device
_UpperCamelCase = (x > 0).int()
_UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a__ , dtype=torch.intaa )
_UpperCamelCase = rearrange(a__ , '''d -> d 1 1''' )
_UpperCamelCase = rearrange(a__ , '''b (c d) h w -> b c d h w''' , d=8 )
_UpperCamelCase = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def lowercase ( self : Dict , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : float = 0.0 , a__ : bool = True , a__ : Union[str, Any]=None , a__ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[timestep]
_UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(a__ , -scale , a__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_UpperCamelCase = self._get_variance(a__ , a__ )
_UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_UpperCamelCase = model_output.device if torch.is_tensor(a__ ) else '''cpu'''
_UpperCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=a__ ).to(a__ )
_UpperCamelCase = self._get_variance(a__ , a__ ) ** 0.5 * eta * noise
_UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
def lowercase ( self : int , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : Union[str, Any]="epsilon" , a__ : Union[str, Any]=None , a__ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
_UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_UpperCamelCase , _UpperCamelCase = torch.split(a__ , sample.shape[1] , dim=1 )
else:
_UpperCamelCase = None
# 1. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[t]
_UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_UpperCamelCase = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(a__ , -scale , a__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCamelCase = 0
if t > 0:
_UpperCamelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=a__ ).to(model_output.device )
_UpperCamelCase = (self._get_variance(a__ , predicted_variance=a__ ) ** 0.5) * noise
_UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Optional[int] , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : Union[DDIMScheduler, DDPMScheduler] , __UpperCamelCase : Optional[float] = 1.0 , ) -> Dict:
super().__init__()
_UpperCamelCase = bit_scale
_UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(__UpperCamelCase , __UpperCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __UpperCamelCase : Optional[int] = 256 , __UpperCamelCase : Optional[int] = 256 , __UpperCamelCase : Optional[int] = 50 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , **__UpperCamelCase : Dict , ) -> Union[Tuple, ImagePipelineOutput]:
_UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__UpperCamelCase , )
_UpperCamelCase = decimal_to_bits(__UpperCamelCase ) * self.bit_scale
_UpperCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_UpperCamelCase = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
_UpperCamelCase = bits_to_decimal(__UpperCamelCase )
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 718 | """simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase = logging.getLogger()
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def lowercase ( a__ : Tuple , a__ : Dict="eval" ) -> List[Any]:
_UpperCamelCase = os.path.join(a__ , F'''{split}_results.json''' )
if os.path.exists(a__ ):
with open(a__ , '''r''' ) as f:
return json.load(a__ )
raise ValueError(F'''can\'t find {path}''' )
UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( _lowercase):
def _UpperCamelCase ( self : str ) -> int:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_flax_glue.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_clm_flax.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_summarization_flax.main()
_UpperCamelCase = get_results(__UpperCamelCase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_mlm_flax.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_ta_mlm_flax.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_flax_ner.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_qa.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 342 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="ylacombe/bark-small"
lowerCamelCase__: Tuple =tempfile.mkdtemp()
lowerCamelCase__: Tuple ="en_speaker_1"
lowerCamelCase__: Optional[int] ="This is a test string"
lowerCamelCase__: List[str] ="speaker_embeddings_path.json"
lowerCamelCase__: int ="speaker_embeddings"
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , **UpperCAmelCase_ : Any) ->Tuple:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.get_tokenizer()
lowerCamelCase__: List[str] =BarkProcessor(tokenizer=UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
lowerCamelCase__: Dict =BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase__: Dict =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
lowerCamelCase__: Any =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Any =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase__: List[str] =35
lowerCamelCase__: Optional[Any] =2
lowerCamelCase__: Optional[Any] =8
lowerCamelCase__: Optional[int] ={
"semantic_prompt": np.ones(UpperCAmelCase_),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)),
"fine_prompt": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=UpperCAmelCase_)
lowerCamelCase__: int =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist())
# test loading voice preset from npz file
lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , "file.npz")
np.savez(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Tuple =processor(text=self.input_string , voice_preset=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist())
# test loading voice preset from the hub
lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=self.voice_preset)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =self.get_tokenizer()
lowerCamelCase__: Dict =BarkProcessor(tokenizer=UpperCAmelCase_)
lowerCamelCase__: List[Any] =processor(text=self.input_string)
lowerCamelCase__: Optional[int] =tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 59 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''') | 45 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[Any]=3 , lowerCamelCase_ :Dict=6_4 , lowerCamelCase_ :Dict=None ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = np.random.default_rng(lowerCamelCase_ )
UpperCamelCase__ = length
UpperCamelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
UpperCamelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self :Dict ) -> Optional[Any]:
"""simple docstring"""
return self.length
def __getitem__( self :Optional[int] , lowerCamelCase_ :str ) -> int:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class lowerCAmelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[str, Any]=0 , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Dict=False ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCamelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ = True
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Optional[int]=None ) -> str:
"""simple docstring"""
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
UpperCamelCase__ = False
return x * self.a[0] + self.b[0]
class lowerCAmelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :Tuple=0 , lowerCamelCase_ :Tuple=0 , lowerCamelCase_ :Optional[Any]=False ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCamelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
UpperCamelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
UpperCamelCase__ = True
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Tuple=None ) -> str:
"""simple docstring"""
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
UpperCamelCase__ = False
return x * self.a + self.b
def snake_case__ ( _snake_case : Any , _snake_case : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCamelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCamelCase__ = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
UpperCamelCase__ = load_dataset("csv" , data_files=_snake_case )
UpperCamelCase__ = datasets["train"].unique("label" )
UpperCamelCase__ = {v: i for i, v in enumerate(_snake_case )}
def tokenize_function(_snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_snake_case , max_length=_snake_case , padding="max_length" )
if "label" in examples:
UpperCamelCase__ = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ = datasets.map(
_snake_case , batched=_snake_case , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_snake_case : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(_snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCamelCase__ = DataLoader(tokenized_datasets["train"] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=2 )
UpperCamelCase__ = DataLoader(tokenized_datasets["validation"] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=1 )
return train_dataloader, eval_dataloader | 718 | """simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A : List[str] = logging.getLogger(__name__)
A : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
A : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
A = field(
default=snake_case__ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
A = field(
default=snake_case__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case__ )} , )
A = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
A = field(
default=snake_case__ , metadata={'help': 'The input training data file (a text file).'} )
A = field(
default=snake_case__ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
A = field(
default=snake_case__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
A = field(
default=snake_case__ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
A = field(
default=snake_case__ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
A = field(
default=snake_case__ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
A = field(
default=snake_case__ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
A = field(default=snake_case__ , metadata={'help': 'Whether ot not to use whole word mask.'} )
A = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
A = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
A = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
A = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
A = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case__ ( _snake_case : DataTrainingArguments , _snake_case : PreTrainedTokenizer , _snake_case : bool = False , _snake_case : Optional[str] = None , ):
"""simple docstring"""
def _dataset(_snake_case : str , _snake_case : int=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , ref_path=_snake_case , )
return LineByLineTextDataset(tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCamelCase__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCamelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
UpperCamelCase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
UpperCamelCase__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
UpperCamelCase__ = AutoModelWithLMHead.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
UpperCamelCase__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCamelCase__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCamelCase__ = (
get_dataset(_snake_case , tokenizer=_snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCamelCase__ = (
get_dataset(_snake_case , tokenizer=_snake_case , evaluate=_snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCamelCase__ = DataCollatorForPermutationLanguageModeling(
tokenizer=_snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCamelCase__ = DataCollatorForWholeWordMask(
tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
else:
UpperCamelCase__ = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCamelCase__ = Trainer(
model=_snake_case , args=_snake_case , data_collator=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , prediction_loss_only=_snake_case , )
# Training
if training_args.do_train:
UpperCamelCase__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase__ = trainer.evaluate()
UpperCamelCase__ = math.exp(eval_output["eval_loss"] )
UpperCamelCase__ = {"perplexity": perplexity}
UpperCamelCase__ = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(_snake_case )
return results
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 304 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = tf.data.AUTOTUNE
def _a ( ) -> str:
a = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=_A , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=_A , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=_A , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=_A , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=_A , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=_A , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=_A , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=_A , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=_A , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=_A , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=_A , default=1e-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=_A , default=1e-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=_A , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=_A , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=_A , required=_A , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=_A , help='''Model ID to upload to on the Hugging Face Hub.''' )
a = parser.parse_args()
return args
def _a ( a :int ) -> Optional[Any]:
try:
if args.tpu_name:
a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(_A )
tf.tpu.experimental.initialize_tpu_system(_A )
return tpu
def _a ( a :Optional[int] ) -> List[Any]:
a = 0
for file in file_list:
a = file.split('''/''' )[-1]
a = re.search(r'''-\d+-(\d+)\.tfrecord''' , _A ).group(1 )
a = int(_A )
num_samples += sample_count
return num_samples
def _a ( a :int , a :Dict , a :Any , a :Dict , a :Optional[Any] , a :int=None ) -> str:
a = count_samples(_A )
a = tf.data.Dataset.from_tensor_slices(_A )
if shuffle:
a = dataset.shuffle(len(_A ) )
a = tf.data.TFRecordDataset(_A , num_parallel_reads=_A )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
a = dataset.apply(tf.data.experimental.assert_cardinality(_A ) )
a = dataset.map(_A , num_parallel_calls=_A )
if shuffle:
assert shuffle_buffer_size is not None
a = dataset.shuffle(args.shuffle_buffer_size )
a = dataset.batch(_A , drop_remainder=_A )
a = dataset.map(_A , num_parallel_calls=_A )
a = dataset.prefetch(_A )
return dataset
def _a ( a :Any ) -> List[Any]:
if not args.no_tpu:
a = initialize_tpu(_A )
a = tf.distribute.TPUStrategy(_A )
else:
a = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
a = AutoTokenizer.from_pretrained(args.tokenizer )
a = AutoConfig.from_pretrained(args.pretrained_model_config )
a = tokenizer.vocab_size
a = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F"""No .tfrecord files found in {args.train_dataset}.""" )
a = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F"""No .tfrecord files found in {args.eval_dataset}.""" )
a = count_samples(_A )
a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
a = steps_per_epoch * args.num_epochs
with strategy.scope():
a = TFAutoModelForMaskedLM.from_config(_A )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
a , a = create_optimizer(
num_train_steps=_A , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_A , metrics=['''accuracy'''] )
def decode_fn(a :List[str] ):
a = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_A , _A )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
a = DataCollatorForLanguageModeling(
tokenizer=_A , mlm_probability=args.mlm_probability , mlm=_A , return_tensors='''tf''' )
def mask_with_collator(a :Tuple ):
# TF really needs an isin() function
a = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
a , a = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(_A ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_A , )
return batch
a = args.per_replica_batch_size * strategy.num_replicas_in_sync
a = prepare_dataset(
_A , decode_fn=_A , mask_fn=_A , batch_size=_A , shuffle=_A , shuffle_buffer_size=args.shuffle_buffer_size , )
a = prepare_dataset(
_A , decode_fn=_A , mask_fn=_A , batch_size=_A , shuffle=_A , )
a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_A ) )
model.fit(
_A , validation_data=_A , epochs=args.num_epochs , callbacks=_A , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCAmelCase__ = parse_args()
main(args)
| 117 |
def lowerCamelCase__ ( ):
'''simple docstring'''
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 2
while i * i <= n:
snake_case_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase__ ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(_A ) > 500 )
if __name__ == "__main__":
print(solution())
| 376 | 0 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( lowercase ):
__lowercase : List[str] = ["image_processor", "tokenizer"]
__lowercase : Any = "FlavaImageProcessor"
__lowercase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase_ , )
_UpperCamelCase = kwargs.pop("feature_extractor" )
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = self.image_processor
def __call__( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = 0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = True , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCamelCase = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
if images is not None:
_UpperCamelCase = self.image_processor(
lowerCamelCase_ , return_image_mask=lowerCamelCase_ , return_codebook_pixels=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCamelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def lowercase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowercase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase ( self ) -> str:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase_ , )
return self.image_processor_class
@property
def lowercase ( self ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase_ , )
return self.image_processor
| 711 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__lowerCAmelCase = True
from torch.cuda.amp import autocast
__lowerCAmelCase = logging.getLogger(__name__)
def _lowercase ( a__ : List[str]=None , a__ : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=a__ )
@dataclass
class lowerCamelCase_ :
__lowercase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowercase : Optional[str] = field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__lowercase : Optional[bool] = field(
default=lowercase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__lowercase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
__lowercase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
__lowercase : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
__lowercase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
__lowercase : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
__lowercase : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCamelCase_ :
__lowercase : Optional[str] = field(
default=lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__lowercase : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__lowercase : bool = field(
default=lowercase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowercase : Optional[int] = field(
default=lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__lowercase : Optional[int] = field(
default=lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__lowercase : Optional[int] = field(
default=lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
__lowercase : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class lowerCamelCase_ :
__lowercase : WavaVecaProcessor
__lowercase : Union[bool, str] = True
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
def __call__( self , lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCamelCase = [{"input_values": feature["input_values"]} for feature in features]
_UpperCamelCase = [{"input_ids": feature["labels"]} for feature in features]
_UpperCamelCase = self.processor.pad(
lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
_UpperCamelCase = self.processor.pad(
labels=lowerCamelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
_UpperCamelCase = labels
return batch
class lowerCamelCase_ ( lowercase ):
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> torch.Tensor:
"""simple docstring"""
model.train()
_UpperCamelCase = self._prepare_inputs(lowerCamelCase_ )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(lowerCamelCase_ , lowerCamelCase_ )
else:
_UpperCamelCase = self.compute_loss(lowerCamelCase_ , lowerCamelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase_ )
else:
loss.backward()
return loss.detach()
def _lowercase ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , a__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
_UpperCamelCase = f'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(a__ : Tuple ):
_UpperCamelCase = re.sub(a__ , "" , batch["sentence"] ).lower() + " "
return batch
_UpperCamelCase = train_dataset.map(a__ , remove_columns=["sentence"] )
_UpperCamelCase = eval_dataset.map(a__ , remove_columns=["sentence"] )
def extract_all_chars(a__ : Tuple ):
_UpperCamelCase = " ".join(batch["text"] )
_UpperCamelCase = list(set(a__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
a__ , batched=a__ , batch_size=-1 , keep_in_memory=a__ , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
a__ , batched=a__ , batch_size=-1 , keep_in_memory=a__ , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(a__ )}
_UpperCamelCase = vocab_dict[" "]
del vocab_dict[" "]
_UpperCamelCase = len(a__ )
_UpperCamelCase = len(a__ )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(a__ , a__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=a__ , return_attention_mask=a__ )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(a__ ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(a__ ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(a__ : List[Any] ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch["path"] )
_UpperCamelCase = resampler(a__ ).squeeze().numpy()
_UpperCamelCase = 1_60_00
_UpperCamelCase = batch["text"]
return batch
_UpperCamelCase = train_dataset.map(
a__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
a__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(a__ : Dict ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
_UpperCamelCase = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(a__ )
return batch
_UpperCamelCase = train_dataset.map(
a__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=a__ , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
a__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=a__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric("wer" )
def compute_metrics(a__ : Union[str, Any] ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(a__ , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(a__ )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=a__ )
_UpperCamelCase = wer_metric.compute(predictions=a__ , references=a__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=a__ , padding=a__ )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=a__ , data_collator=a__ , args=a__ , compute_metrics=a__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a__ )
)
_UpperCamelCase = min(a__ , len(a__ ) )
trainer.log_metrics("train" , a__ )
trainer.save_metrics("train" , a__ )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(a__ )
_UpperCamelCase = min(a__ , len(a__ ) )
trainer.log_metrics("eval" , a__ )
trainer.save_metrics("eval" , a__ )
return results
if __name__ == "__main__":
main()
| 589 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
snake_case__ = field(
default=UpperCAmelCase_ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase_ )} , )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _snake_case :
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "The input training data file (a text file)."} )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
snake_case__ = field(default=UpperCAmelCase_ , metadata={"help": "Whether ot not to use whole word mask."} )
snake_case__ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
snake_case__ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
snake_case__ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
snake_case__ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
snake_case__ = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: str , _lowerCamelCase: Tuple = False , _lowerCamelCase: Optional[int] = None , ) -> Tuple:
'''simple docstring'''
def _dataset(_lowerCamelCase: Optional[Any] , _lowerCamelCase: Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=_lowerCamelCase , file_path=_lowerCamelCase , block_size=args.block_size , ref_path=_lowerCamelCase , )
return LineByLineTextDataset(tokenizer=_lowerCamelCase , file_path=_lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowerCamelCase , file_path=_lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowerCamelCase : Any = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
__lowerCamelCase : Any = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
__lowerCamelCase : Optional[Any] = AutoModelWithLMHead.from_config(_lowerCamelCase )
model.resize_token_embeddings(len(_lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
__lowerCamelCase : Tuple = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowerCamelCase : List[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowerCamelCase : Dict = (
get_dataset(_lowerCamelCase , tokenizer=_lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowerCamelCase : Union[str, Any] = (
get_dataset(_lowerCamelCase , tokenizer=_lowerCamelCase , evaluate=_lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowerCamelCase : List[str] = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowerCamelCase : Optional[int] = DataCollatorForWholeWordMask(
tokenizer=_lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowerCamelCase : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=_lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCamelCase : Optional[int] = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , data_collator=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , prediction_loss_only=_lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCamelCase : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCamelCase : Any = trainer.evaluate()
__lowerCamelCase : List[str] = math.exp(eval_output["eval_loss"] )
__lowerCamelCase : Optional[Any] = {"perplexity": perplexity}
__lowerCamelCase : Optional[Any] = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _lowerCamelCase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(_lowerCamelCase )
return results
def lowercase_ ( _lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 646 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase ="""▁"""
__UpperCAmelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : str = BertGenerationTokenizer
lowercase__ : int = False
lowercase__ : Optional[Any] = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
A__ = BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ):
'''simple docstring'''
A__ = "<s>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(UpperCamelCase__ ) , 10_02 )
def lowercase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowercase_ ( self ):
'''simple docstring'''
A__ = BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = "Hello World!"
A__ = [1_85_36, 22_60, 1_01]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
A__ = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def lowercase_ ( self ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A__ = " ".join(UpperCamelCase__ )
A__ = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
A__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
A__ = BertGenerationConfig()
A__ = BertGenerationEncoder(UpperCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , ) | 337 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ :Any = [
'good first issue',
'feature request',
'wip',
]
def lowerCamelCase_ ( ) ->str:
"""simple docstring"""
__UpperCAmelCase : str = Github(os.environ['''GITHUB_TOKEN'''] )
__UpperCAmelCase : Tuple = g.get_repo('''huggingface/accelerate''' )
__UpperCAmelCase : str = repo.get_issues(state='''open''' )
for issue in open_issues:
__UpperCAmelCase : int = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
__UpperCAmelCase : Optional[Any] = dt.utcnow()
__UpperCAmelCase : Optional[int] = (current_time - issue.updated_at).days
__UpperCAmelCase : Optional[Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 374 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Dict = nn.Linear(3 , 4 )
__UpperCAmelCase : Union[str, Any] = nn.BatchNormad(4 )
__UpperCAmelCase : List[str] = nn.Linear(4 , 5 )
def A_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__lowercase ) ) )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def A_ ( self : Union[str, Any] , __lowercase : Optional[int] , *__lowercase : str , **__lowercase : Optional[int] ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def A_ ( self : Any , __lowercase : Tuple , __lowercase : Any ):
'''simple docstring'''
return output + 1
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
__UpperCAmelCase : Optional[int] = ModelHook()
add_hook_to_module(__lowercase , __lowercase )
self.assertEqual(test_model._hf_hook , __lowercase )
self.assertTrue(hasattr(__lowercase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__lowercase )
self.assertFalse(hasattr(__lowercase , '''_hf_hook''' ) )
self.assertFalse(hasattr(__lowercase , '''_old_forward''' ) )
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = ModelForTest()
__UpperCAmelCase : Tuple = ModelHook()
add_hook_to_module(__lowercase , __lowercase )
add_hook_to_module(__lowercase , __lowercase , append=__lowercase )
self.assertEqual(isinstance(test_model._hf_hook , __lowercase ) , __lowercase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__lowercase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__lowercase )
self.assertFalse(hasattr(__lowercase , '''_hf_hook''' ) )
self.assertFalse(hasattr(__lowercase , '''_old_forward''' ) )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
__UpperCAmelCase : Tuple = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = test_model(x + 1 )
__UpperCAmelCase : Optional[Any] = test_model(x + 2 )
__UpperCAmelCase : Optional[int] = PreForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : int = PreForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Any = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Tuple = test_model(__lowercase )
assert torch.allclose(__lowercase , __lowercase , atol=1e-5 )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : int = ModelForTest()
__UpperCAmelCase : List[Any] = torch.randn(2 , 3 )
__UpperCAmelCase : Tuple = test_model(__lowercase )
__UpperCAmelCase : int = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : str = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : List[str] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Optional[int] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Dict = test_model(__lowercase )
assert torch.allclose(__lowercase , output + 2 , atol=1e-5 )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ModelForTest()
__UpperCAmelCase : Union[str, Any] = torch.randn(2 , 3 )
__UpperCAmelCase : str = test_model(__lowercase )
__UpperCAmelCase : Union[str, Any] = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : int = test_model(__lowercase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__UpperCAmelCase : Dict = torch.randn(2 , 3 )
__UpperCAmelCase : Any = model(__lowercase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__lowercase , AlignDevicesHook(io_same_device=__lowercase ) )
__UpperCAmelCase : List[Any] = torch.randn(2 , 3 ).to(0 )
__UpperCAmelCase : int = model(__lowercase )
self.assertEqual(output.device , torch.device(0 ) )
def A_ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : Tuple = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Optional[int] = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : int = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
__UpperCAmelCase : str = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : Optional[Any] = torch.randn(2 , 3 )
__UpperCAmelCase : List[Any] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : Optional[Any] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(__lowercase , execution_device=__lowercase , offload=__lowercase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Dict = torch.device(__lowercase )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : Optional[int] = torch.randn(2 , 3 )
__UpperCAmelCase : Dict = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__lowercase , execution_device=__lowercase , offload=__lowercase , offload_buffers=__lowercase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : Dict = torch.randn(2 , 3 )
__UpperCAmelCase : str = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : str = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
__lowercase , execution_device=__lowercase , offload=__lowercase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Optional[Any] = torch.device(__lowercase )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : Any = torch.randn(2 , 3 )
__UpperCAmelCase : Dict = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__lowercase , execution_device=__lowercase , offload=__lowercase , weights_map=model.state_dict() , offload_buffers=__lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : List[str] = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) | 374 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __A ( UpperCamelCase__ ):
a__ : BigBirdConfig
a__ : jnp.dtype = jnp.floataa
a__ : bool = True
def _lowercase (self : Dict ):
super().setup()
UpperCAmelCase_ = nn.Dense(5 , dtype=self.dtype )
def __call__(self : Optional[Any] , *__a : Tuple , **__a : List[Any] ):
UpperCAmelCase_ = super().__call__(*__a , **__a )
UpperCAmelCase_ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __A ( UpperCamelCase__ ):
a__ : str = FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> str:
'''simple docstring'''
def cross_entropy(snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[Any]=None ):
UpperCAmelCase_ = logits.shape[-1]
UpperCAmelCase_ = (labels[..., None] == jnp.arange(snake_case_ )[None]).astype("f4" )
UpperCAmelCase_ = jax.nn.log_softmax(snake_case_ , axis=-1 )
UpperCAmelCase_ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
UpperCAmelCase_ = reduction(snake_case_ )
return loss
UpperCAmelCase_ = partial(snake_case_ , reduction=jnp.mean )
UpperCAmelCase_ = cross_entropy(snake_case_ , snake_case_ )
UpperCAmelCase_ = cross_entropy(snake_case_ , snake_case_ )
UpperCAmelCase_ = cross_entropy(snake_case_ , snake_case_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __A :
a__ : str = "google/bigbird-roberta-base"
a__ : int = 3_000
a__ : int = 10_500
a__ : int = 128
a__ : int = 3
a__ : int = 1
a__ : int = 5
# tx_args
a__ : float = 3e-5
a__ : float = 0.0
a__ : int = 20_000
a__ : float = 0.0_0_9_5
a__ : str = "bigbird-roberta-natural-questions"
a__ : str = "training-expt"
a__ : str = "data/nq-training.jsonl"
a__ : str = "data/nq-validation.jsonl"
def _lowercase (self : str ):
os.makedirs(self.base_dir , exist_ok=__a )
UpperCAmelCase_ = os.path.join(self.base_dir , self.save_dir )
UpperCAmelCase_ = self.batch_size_per_device * jax.device_count()
@dataclass
class __A :
a__ : int
a__ : int = 4_096 # no dynamic padding on TPUs
def __call__(self : List[Any] , __a : Any ):
UpperCAmelCase_ = self.collate_fn(__a )
UpperCAmelCase_ = jax.tree_util.tree_map(__a , __a )
return batch
def _lowercase (self : Tuple , __a : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.fetch_inputs(features["input_ids"] )
UpperCAmelCase_ = {
"input_ids": jnp.array(__a , dtype=jnp.intaa ),
"attention_mask": jnp.array(__a , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def _lowercase (self : str , __a : list ):
UpperCAmelCase_ = [self._fetch_inputs(__a ) for ids in input_ids]
return zip(*__a )
def _lowercase (self : List[Any] , __a : list ):
UpperCAmelCase_ = [1 for _ in range(len(__a ) )]
while len(__a ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any , snake_case_ : List[Any]=None ) -> Any:
'''simple docstring'''
if seed is not None:
UpperCAmelCase_ = dataset.shuffle(seed=snake_case_ )
for i in range(len(snake_case_ ) // batch_size ):
UpperCAmelCase_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(snake_case_ )
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , **snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
def loss_fn(snake_case_ : Tuple ):
UpperCAmelCase_ = model_inputs.pop("start_labels" )
UpperCAmelCase_ = model_inputs.pop("end_labels" )
UpperCAmelCase_ = model_inputs.pop("pooled_labels" )
UpperCAmelCase_ = state.apply_fn(**snake_case_ , params=snake_case_ , dropout_rng=snake_case_ , train=snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = outputs
return state.loss_fn(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(snake_case_ )
UpperCAmelCase_ = jax.value_and_grad(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = grad_fn(state.params )
UpperCAmelCase_ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
UpperCAmelCase_ = jax.lax.pmean(snake_case_ , "batch" )
UpperCAmelCase_ = state.apply_gradients(grads=snake_case_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase_ ( snake_case_ : List[str] , **snake_case_ : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_ = model_inputs.pop("start_labels" )
UpperCAmelCase_ = model_inputs.pop("end_labels" )
UpperCAmelCase_ = model_inputs.pop("pooled_labels" )
UpperCAmelCase_ = state.apply_fn(**snake_case_ , params=state.params , train=snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = outputs
UpperCAmelCase_ = state.loss_fn(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class __A ( train_state.TrainState ):
a__ : Callable = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class __A :
a__ : Args
a__ : Callable
a__ : Callable
a__ : Callable
a__ : Callable
a__ : wandb
a__ : Callable = None
def _lowercase (self : str , __a : Union[str, Any] , __a : List[Any] , __a : Any , __a : int=None ):
UpperCAmelCase_ = model.params
UpperCAmelCase_ = TrainState.create(
apply_fn=model.__call__ , params=__a , tx=__a , loss_fn=__a , )
if ckpt_dir is not None:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = restore_checkpoint(__a , __a )
UpperCAmelCase_ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
UpperCAmelCase_ , UpperCAmelCase_ = build_tx(**__a )
UpperCAmelCase_ = train_state.TrainState(
step=__a , apply_fn=model.__call__ , params=__a , tx=__a , opt_state=__a , )
UpperCAmelCase_ = args
UpperCAmelCase_ = data_collator
UpperCAmelCase_ = lr
UpperCAmelCase_ = params
UpperCAmelCase_ = jax_utils.replicate(__a )
return state
def _lowercase (self : Tuple , __a : Dict , __a : str , __a : Any ):
UpperCAmelCase_ = self.args
UpperCAmelCase_ = len(__a ) // args.batch_size
UpperCAmelCase_ = jax.random.PRNGKey(0 )
UpperCAmelCase_ = jax.random.split(__a , jax.device_count() )
for epoch in range(args.max_epochs ):
UpperCAmelCase_ = jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase_ = get_batched_dataset(__a , args.batch_size , seed=__a )
UpperCAmelCase_ = 0
for batch in tqdm(__a , total=__a , desc=f"""Running EPOCH-{epoch}""" ):
UpperCAmelCase_ = self.data_collator(__a )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.train_step_fn(__a , __a , **__a )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
UpperCAmelCase_ = jax_utils.unreplicate(state.step )
UpperCAmelCase_ = running_loss.item() / i
UpperCAmelCase_ = self.scheduler_fn(state_step - 1 )
UpperCAmelCase_ = self.evaluate(__a , __a )
UpperCAmelCase_ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(__a ) )
self.logger.log(__a , commit=__a )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=__a )
def _lowercase (self : List[str] , __a : List[Any] , __a : Any ):
UpperCAmelCase_ = get_batched_dataset(__a , self.args.batch_size )
UpperCAmelCase_ = len(__a ) // self.args.batch_size
UpperCAmelCase_ = jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase_ = 0
for batch in tqdm(__a , total=__a , desc="Evaluating ... " ):
UpperCAmelCase_ = self.data_collator(__a )
UpperCAmelCase_ = self.val_step_fn(__a , **__a )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def _lowercase (self : Optional[int] , __a : List[str] , __a : int ):
UpperCAmelCase_ = jax_utils.unreplicate(__a )
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " )
self.model_save_fn(__a , params=state.params )
with open(os.path.join(__a , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__a , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(__a , "data_collator.joblib" ) )
with open(os.path.join(__a , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , __a )
print("DONE" )
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(snake_case_ , "flax_model.msgpack" ) , "rb" ) as f:
UpperCAmelCase_ = from_bytes(state.params , f.read() )
with open(os.path.join(snake_case_ , "opt_state.msgpack" ) , "rb" ) as f:
UpperCAmelCase_ = from_bytes(state.opt_state , f.read() )
UpperCAmelCase_ = joblib.load(os.path.join(snake_case_ , "args.joblib" ) )
UpperCAmelCase_ = joblib.load(os.path.join(snake_case_ , "data_collator.joblib" ) )
with open(os.path.join(snake_case_ , "training_state.json" ) , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
UpperCAmelCase_ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = num_train_steps - warmup_steps
UpperCAmelCase_ = optax.linear_schedule(init_value=snake_case_ , end_value=snake_case_ , transition_steps=snake_case_ )
UpperCAmelCase_ = optax.linear_schedule(init_value=snake_case_ , end_value=1E-7 , transition_steps=snake_case_ )
UpperCAmelCase_ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
def weight_decay_mask(snake_case_ : Any ):
UpperCAmelCase_ = traverse_util.flatten_dict(snake_case_ )
UpperCAmelCase_ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(snake_case_ )
UpperCAmelCase_ = scheduler_fn(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = optax.adamw(learning_rate=snake_case_ , weight_decay=snake_case_ , mask=snake_case_ )
return tx, lr
| 78 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_file
__magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def A ( self ):
"""simple docstring"""
__magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.__dict__.copy()
__magic_name__ :Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ :Optional[int] = {}
__magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
__magic_name__ :Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
__magic_name__ :Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
__magic_name__ :Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 0 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = 42
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase : int = namedtuple("CoinsDistribResult", "moves excess")
def __a ( _lowercase ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_lowercase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_lowercase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_lowercase ) != count_coins(_lowercase ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_lowercase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = get_distrib(node.left )
lowerCamelCase__ , lowerCamelCase__ : List[str] = get_distrib(node.right )
lowerCamelCase__ : Dict = 1 - left_distrib_excess
lowerCamelCase__ : List[str] = 1 - right_distrib_excess
lowerCamelCase__ : List[str] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_lowercase )
+ abs(_lowercase )
)
lowerCamelCase__ : Union[str, Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_lowercase , _lowercase )
return get_distrib(_lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 | """simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Any = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowerCamelCase__ : Optional[Any] = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
lowerCamelCase__ : Any = args.output + '''.pt'''
lowerCamelCase__ : List[str] = OrderedDict()
with tf.device('''/CPU:0''' ):
lowerCamelCase__ : List[str] = tf.train.load_checkpoint(args.tf_model_dir )
lowerCamelCase__ : Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCamelCase__ : Any = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowerCamelCase__ : Tuple = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowerCamelCase__ : Union[str, Any] = 8
lowerCamelCase__ : Optional[Any] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCamelCase__ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[str] = torch.tensor(_lowercase )
elif key_name.startswith('''model/moe''' ):
lowerCamelCase__ : Dict = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowerCamelCase__ : Any = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCamelCase__ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/softmlp/kernel''' ):
lowerCamelCase__ : Optional[Any] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCamelCase__ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowerCamelCase__ : Optional[int] = key_name[-9:-7]
for i in range(16 ):
lowerCamelCase__ : str = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCamelCase__ : Union[str, Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith('''model/mlp''' ):
lowerCamelCase__ : Dict = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowerCamelCase__ : Dict = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCamelCase__ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Tuple = torch.tensor(_lowercase )
elif key_name.endswith('''/p1/bias''' ):
lowerCamelCase__ : Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCamelCase__ : Dict = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Union[str, Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/kernel''' ):
lowerCamelCase__ : Tuple = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCamelCase__ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Any = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/bias''' ):
lowerCamelCase__ : int = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCamelCase__ : Dict = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : List[Any] = torch.tensor(_lowercase )
elif key_name.startswith('''model/ln''' ):
lowerCamelCase__ : Tuple = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase__ : Union[str, Any] = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCamelCase__ : Tuple = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : int = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
lowerCamelCase__ : Tuple = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCamelCase__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : List[str] = torch.tensor(_lowercase )
elif key_name.startswith('''model/att''' ):
lowerCamelCase__ : str = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowerCamelCase__ : List[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCamelCase__ : Optional[Any] = state[:, 0, :, :]
lowerCamelCase__ : int = state[:, 1, :, :]
lowerCamelCase__ : Optional[int] = state[:, 2, :, :]
lowerCamelCase__ : str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : str = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Optional[Any] = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
lowerCamelCase__ : str = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
lowerCamelCase__ : Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCamelCase__ : int = torch.tensor(_lowercase )
elif key_name.endswith('''/o/kernel''' ):
lowerCamelCase__ : List[str] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCamelCase__ : Any = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Any = torch.tensor(_lowercase )
elif key_name.startswith('''model/an''' ):
lowerCamelCase__ : str = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase__ : int = '''model.blocks.%d.self_attn.norm.bias''' % player
lowerCamelCase__ : Tuple = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
lowerCamelCase__ : int = '''model.blocks.%d.self_attn.norm.weight''' % player
lowerCamelCase__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Any = torch.tensor(_lowercase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowerCamelCase__ : Optional[int] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCamelCase__ : List[Any] = '''model.%s.weight''' % nlayer
lowerCamelCase__ : List[Any] = vnp.copy() # same in embedded
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
if key_name.startswith('''model/wte''' ):
lowerCamelCase__ : str = '''lm_head.weight'''
lowerCamelCase__ : Dict = vnp.copy() # same in embedded
lowerCamelCase__ : List[str] = torch.tensor(_lowercase )
elif key_name.startswith('''model/wob''' ):
lowerCamelCase__ : List[Any] = '''final_logits_bias'''
lowerCamelCase__ : List[str] = vnp.copy() # same in embedded
lowerCamelCase__ : int = state.reshape((1, -1) )
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
lowerCamelCase__ : List[Any] = '''model.last_project.weight'''
lowerCamelCase__ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
lowerCamelCase__ : Dict = '''model.last_project.bias'''
lowerCamelCase__ : Tuple = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
UpperCAmelCase : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 121 | 1 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : str , a_ : Dict , a_ : List[str]=False , a_ : Any=True , a_ : int=False , a_ : Union[str, Any]="<s>" , a_ : Optional[int]="</s>" , a_ : int="<unk>" , a_ : List[Any]="<sep>" , a_ : Dict="<pad>" , a_ : Any="<cls>" , a_ : Optional[Any]="<mask>" , a_ : int=["<eop>", "<eod>"] , a_ : Optional[Dict[str, Any]] = None , **a_ : int , ) -> None:
'''simple docstring'''
a__ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
a__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a__ : Union[str, Any] = 3
a__ : Dict = do_lower_case
a__ : Union[str, Any] = remove_space
a__ : int = keep_accents
a__ : str = vocab_file
a__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
a__ : Optional[int] = jieba
a__ : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
'''simple docstring'''
a__ : Tuple = self.__dict__.copy()
a__ : Union[str, Any] = None
return state
def __setstate__( self : Tuple , a_ : int ) -> List[str]:
'''simple docstring'''
a__ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : str = {}
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : List[Any] , a_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if self.remove_space:
a__ : Union[str, Any] = " ".join(inputs.strip().split() )
else:
a__ : Optional[Any] = inputs
a__ : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__ : Union[str, Any] = unicodedata.normalize("NFKD" , a_ )
a__ : Union[str, Any] = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
a__ : List[Any] = outputs.lower()
return outputs
def UpperCAmelCase ( self : Any , a_ : str ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.preprocess_text(a_ )
a__ : Dict = self.sp_model.encode(a_ , out_type=a_ )
a__ : Optional[Any] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__ : List[str] = cur_pieces[1:]
else:
a__ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def UpperCAmelCase ( self : int , a_ : Dict ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(a_ )
def UpperCAmelCase ( self : Dict , a_ : Tuple ) -> List[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(a_ )
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[Any] ) -> str:
'''simple docstring'''
a__ : Optional[Any] = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def UpperCAmelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[Any] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def UpperCAmelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self : Dict , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__ : Optional[int] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a__ : int = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def UpperCAmelCase ( self : str , *a_ : Union[str, Any] , **a_ : Any ) -> int:
'''simple docstring'''
a__ : Optional[int] = super()._decode(*a_ , **a_ )
a__ : Tuple = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text | 642 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : str ):
assert column_title.isupper()
lowercase_ : str = 0
lowercase_ : Dict = len(__snake_case ) - 1
lowercase_ : Union[str, Any] = 0
while index >= 0:
lowercase_ : str = (ord(column_title[index] ) - 6_4) * pow(2_6 , __snake_case )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 141 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
@property
def A ( self : str ) -> List[str]:
torch.manual_seed(0 )
lowercase_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def A ( self : List[Any] ) -> str:
lowercase_ : Any = self.dummy_uncond_unet
lowercase_ : List[Any] = ScoreSdeVeScheduler()
lowercase_ : int = ScoreSdeVePipeline(unet=A , scheduler=A )
sde_ve.to(A )
sde_ve.set_progress_bar_config(disable=A )
lowercase_ : int = torch.manual_seed(0 )
lowercase_ : Dict = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=A ).images
lowercase_ : Union[str, Any] = torch.manual_seed(0 )
lowercase_ : List[Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=A , return_dict=A )[
0
]
lowercase_ : int = image[0, -3:, -3:, -1]
lowercase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Any ) -> int:
lowercase_ : Any = '''google/ncsnpp-church-256'''
lowercase_ : int = UNetaDModel.from_pretrained(A )
lowercase_ : Dict = ScoreSdeVeScheduler.from_pretrained(A )
lowercase_ : Optional[Any] = ScoreSdeVePipeline(unet=A , scheduler=A )
sde_ve.to(A )
sde_ve.set_progress_bar_config(disable=A )
lowercase_ : Optional[int] = torch.manual_seed(0 )
lowercase_ : Tuple = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=A ).images
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
lowercase_ : Union[str, Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 141 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 268 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
UpperCamelCase__ = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
UpperCamelCase__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
UpperCamelCase__ = {'''mustc''': MUSTC_LANGS}
class __snake_case ( snake_case__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = MAX_MODEL_INPUT_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = []
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<unk>" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
"""simple docstring"""
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , do_upper_case=_UpperCamelCase , do_lower_case=_UpperCamelCase , tgt_lang=_UpperCamelCase , lang_codes=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
__snake_case = do_upper_case
__snake_case = do_lower_case
__snake_case = load_json(_UpperCamelCase )
__snake_case = {v: k for k, v in self.encoder.items()}
__snake_case = spm_file
__snake_case = load_spm(_UpperCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case = lang_codes
__snake_case = LANGUAGES[lang_codes]
__snake_case = [F'<lang:{lang}>' for lang in self.langs]
__snake_case = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs}
__snake_case = self.lang_tokens
__snake_case = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case = {}
@property
def a ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
@property
def a ( self ) -> str:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def a ( self , _UpperCamelCase ) -> None:
"""simple docstring"""
__snake_case = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCamelCase )
def a ( self , _UpperCamelCase ) -> None:
"""simple docstring"""
__snake_case = self.lang_code_to_id[tgt_lang]
__snake_case = [lang_code_id]
def a ( self , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def a ( self , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder[self.unk_token] )
def a ( self , _UpperCamelCase ) -> str:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase , self.unk_token )
def a ( self , _UpperCamelCase ) -> str:
"""simple docstring"""
__snake_case = []
__snake_case = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case = self.sp_model.decode(_UpperCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case = []
else:
current_sub_tokens.append(_UpperCamelCase )
__snake_case = self.sp_model.decode(_UpperCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def a ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
__snake_case = [1] * len(self.prefix_tokens )
__snake_case = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def a ( self ) -> Dict:
"""simple docstring"""
__snake_case = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self , _UpperCamelCase ) -> None:
"""simple docstring"""
__snake_case = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case = {}
__snake_case = load_spm(self.spm_file , self.sp_model_kwargs )
def a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case = Path(_UpperCamelCase )
assert save_dir.is_dir(), F'{save_directory} should be a directory'
__snake_case = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__snake_case = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCamelCase , """wb""" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (str(_UpperCamelCase ), str(_UpperCamelCase ))
def lowerCamelCase__ ( __A :str ,__A :Dict[str, Any] ):
"""simple docstring"""
__snake_case = sentencepiece.SentencePieceProcessor(**__A )
spm.Load(str(__A ) )
return spm
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
with open(__A ,"""r""" ) as f:
return json.load(__A )
def lowerCamelCase__ ( __A :Optional[Any] ,__A :str ):
"""simple docstring"""
with open(__A ,"""w""" ) as f:
json.dump(__A ,__A ,indent=2 )
| 268 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Optional[Any] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ['OwlViTFeatureExtractor']
snake_case_ : Union[str, Any] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( _UpperCAmelCase : list[int]):
UpperCamelCase = len(_UpperCAmelCase) // 2
# choose the middle 3 elements
UpperCamelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m]) == 2:
m -= 1
return peak(lst[m:])
# decreasing
else:
if len(lst[:m]) == 2:
m += 1
return peak(lst[:m])
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=99 , snake_case__=13 , snake_case__=7 , snake_case__=9 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__=8 , snake_case__=0.1 , snake_case__=0.002 , snake_case__=1 , snake_case__=0 , snake_case__=0 , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Dict = encoder_seq_length
_lowerCAmelCase : Any = decoder_seq_length
# For common tests
_lowerCAmelCase : int = self.decoder_seq_length
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Optional[Any] = use_attention_mask
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Tuple = d_ff
_lowerCAmelCase : Dict = relative_attention_num_buckets
_lowerCAmelCase : Any = dropout_rate
_lowerCAmelCase : Tuple = initializer_factor
_lowerCAmelCase : Optional[int] = eos_token_id
_lowerCAmelCase : str = pad_token_id
_lowerCAmelCase : str = decoder_start_token_id
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = decoder_layers
def a ( self ):
'''simple docstring'''
return TaConfig.from_pretrained('google/umt5-base' )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
if attention_mask is None:
_lowerCAmelCase : str = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase : Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case__ )
if decoder_head_mask is None:
_lowerCAmelCase : List[str] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case__ )
if cross_attn_head_mask is None:
_lowerCAmelCase : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase : str = input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : Any = self.get_config()
_lowerCAmelCase : List[Any] = config.num_attention_heads
_lowerCAmelCase : str = self.prepare_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, input_dict
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def a ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = UMTaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Any = model(
input_ids=snake_case__ , decoder_input_ids=snake_case__ , attention_mask=snake_case__ , decoder_attention_mask=snake_case__ , )
_lowerCAmelCase : Any = model(input_ids=snake_case__ , decoder_input_ids=snake_case__ )
_lowerCAmelCase : str = result.last_hidden_state
_lowerCAmelCase : Any = result.past_key_values
_lowerCAmelCase : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = UMTaModel(config=snake_case__ ).get_decoder().to(snake_case__ ).eval()
# first forward pass
_lowerCAmelCase : List[Any] = model(snake_case__ , use_cache=snake_case__ )
_lowerCAmelCase : Dict = model(snake_case__ )
_lowerCAmelCase : Optional[int] = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
_lowerCAmelCase , _lowerCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_lowerCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Any = model(snake_case__ )['last_hidden_state']
_lowerCAmelCase : Dict = model(snake_case__ , past_key_values=snake_case__ )['last_hidden_state']
# select random slice
_lowerCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : int = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def a ( self , snake_case__ , snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = UMTaModel(config=snake_case__ ).to(snake_case__ ).half().eval()
_lowerCAmelCase : Dict = model(**snake_case__ )['last_hidden_state']
self.parent.assertFalse(torch.isnan(snake_case__ ).any().item() )
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__magic_name__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__magic_name__ = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__magic_name__ = [0.8, 0.9]
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Union[str, Any] = UMTaModel(config_and_inputs[0] ).to(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=snake_case__ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Dict = config_and_inputs[0]
_lowerCAmelCase : int = UMTaForConditionalGeneration(snake_case__ ).eval()
model.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=snake_case__ ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case__ ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case__ ),
}
for attn_name, (name, mask) in zip(snake_case__ , head_masking.items() ):
_lowerCAmelCase : Tuple = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowerCAmelCase : Optional[int] = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case__ )
_lowerCAmelCase : Optional[Any] = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=snake_case__ , return_dict_in_generate=snake_case__ , **snake_case__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowerCAmelCase : Tuple = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def a ( self ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=snake_case__ ).to(snake_case__ )
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=snake_case__ , legacy=snake_case__ )
_lowerCAmelCase : Optional[Any] = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowerCAmelCase : str = tokenizer(snake_case__ , return_tensors='pt' , padding=snake_case__ ).input_ids
# fmt: off
_lowerCAmelCase : Optional[Any] = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case__ , snake_case__ )
_lowerCAmelCase : str = model.generate(input_ids.to(snake_case__ ) )
_lowerCAmelCase : Optional[int] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
| 444 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : str = {
"""gpt2""": 10_24,
"""gpt2-medium""": 10_24,
"""gpt2-large""": 10_24,
"""gpt2-xl""": 10_24,
"""distilgpt2""": 10_24,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = GPTaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
_lowerCAmelCase : str = kwargs.pop('add_bos_token' , snake_case__ )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : str = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[str] = add_prefix_space
_lowerCAmelCase : Dict = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Any = add_prefix_space
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
_lowerCAmelCase : str = input_ids[-self.model_max_length :]
return input_ids
| 444 | 1 |
from ...processing_utils import ProcessorMixin
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Any = 'WhisperFeatureExtractor'
UpperCamelCase_ : List[str] = 'WhisperTokenizer'
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = self.feature_extractor
__lowercase = False
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : int=True ) -> int:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase__ , language=lowerCamelCase__ , no_timestamps=lowerCamelCase__ )
def __call__( self : Optional[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : str ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = kwargs.pop('''audio''' , lowerCamelCase__ )
__lowercase = kwargs.pop('''sampling_rate''' , lowerCamelCase__ )
__lowercase = kwargs.pop('''text''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__lowercase = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
__lowercase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase = encodings['''input_ids''']
return inputs
def UpperCAmelCase_ ( self : Dict , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Any , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Dict="np" ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
| 362 |
import math
def _A( ) -> None:
'''simple docstring'''
__lowercase = input('''Enter message: ''' )
__lowercase = int(input(F'Enter key [2-{len(UpperCamelCase__ ) - 1}]: ' ) )
__lowercase = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowercase = encrypt_message(UpperCamelCase__ , UpperCamelCase__ )
elif mode.lower().startswith('''d''' ):
__lowercase = decrypt_message(UpperCamelCase__ , UpperCamelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def _A( UpperCamelCase__ : int , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = [''''''] * key
for col in range(UpperCamelCase__ ):
__lowercase = col
while pointer < len(UpperCamelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(UpperCamelCase__ )
def _A( UpperCamelCase__ : int , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = math.ceil(len(UpperCamelCase__ ) / key )
__lowercase = key
__lowercase = (num_cols * num_rows) - len(UpperCamelCase__ )
__lowercase = [''''''] * num_cols
__lowercase = 0
__lowercase = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__lowercase = 0
row += 1
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 362 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( __snake_case : float , __snake_case : int ) -> float:
__A : int = u
for i in range(1 , __snake_case ):
__A : Optional[int] = temp * (u - i)
return temp
def _lowerCAmelCase ( ) -> None:
__A : Dict = int(input('enter the numbers of values: ' ) )
__A : list[list[float]] = []
for _ in range(__snake_case ):
y.append([] )
for i in range(__snake_case ):
for j in range(__snake_case ):
y[i].append(__snake_case )
__A : int = 0
print('enter the values of parameters in a list: ' )
__A : List[str] = list(map(__snake_case , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__snake_case ):
__A : Tuple = float(input() )
__A : Tuple = int(input('enter the value to interpolate: ' ) )
__A : Dict = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __snake_case ):
for j in range(n - i ):
__A : Dict = y[j + 1][i - 1] - y[j][i - 1]
__A : List[Any] = y[0][0]
for i in range(1 , __snake_case ):
summ += (ucal(__snake_case , __snake_case ) * y[0][i]) / math.factorial(__snake_case )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main() | 8 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 0 |
"""simple docstring"""
import argparse
import datetime
def lowercase__ ( lowerCAmelCase : str ) -> str:
"""simple docstring"""
UpperCAmelCase = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
UpperCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCAmelCase ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
UpperCAmelCase = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
UpperCAmelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
UpperCAmelCase = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
UpperCAmelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
UpperCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
UpperCAmelCase = datetime.date(int(lowerCAmelCase ) , int(lowerCAmelCase ) , int(lowerCAmelCase ) )
# Start math
if m <= 2:
UpperCAmelCase = y - 1
UpperCAmelCase = m + 12
# maths var
UpperCAmelCase = int(str(lowerCAmelCase )[:2] )
UpperCAmelCase = int(str(lowerCAmelCase )[2:] )
UpperCAmelCase = int(2.6 * m - 5.39 )
UpperCAmelCase = int(c / 4 )
UpperCAmelCase = int(k / 4 )
UpperCAmelCase = int(d + k )
UpperCAmelCase = int(t + u + v + x )
UpperCAmelCase = int(z - (2 * c) )
UpperCAmelCase = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
UpperCAmelCase = F"Your date {date_input}, is a {days[str(lowerCAmelCase )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
zeller(args.date_input)
| 183 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : Tuple=1.0 , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=None ) -> Dict:
"""simple docstring"""
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=4_0_0 , lowercase_=2_0_0_0 , lowercase_=2_0_4_8 , lowercase_=1_2_8 , lowercase_=1 , lowercase_=5_1_2 , lowercase_=3_0 , lowercase_=4_4_1_0_0 , ) -> str:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = spectrogram_length
UpperCAmelCase = feature_size
UpperCAmelCase = num_audio_channels
UpperCAmelCase = hop_length
UpperCAmelCase = chunk_length
UpperCAmelCase = sampling_rate
def a_ ( self ) -> str:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a_ ( self , lowercase_=False , lowercase_=False ) -> Optional[Any]:
def _flatten(lowercase_ ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractor
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = TvltFeatureExtractionTester(self )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowercase_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(lowercase_ , 'feature_size' ) )
self.assertTrue(hasattr(lowercase_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(lowercase_ , 'hop_length' ) )
self.assertTrue(hasattr(lowercase_ , 'chunk_length' ) )
self.assertTrue(hasattr(lowercase_ , 'sampling_rate' ) )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
UpperCAmelCase = self.feature_extraction_class.from_pretrained(lowercase_ )
UpperCAmelCase = feat_extract_first.to_dict()
UpperCAmelCase = feat_extract_second.to_dict()
UpperCAmelCase = dict_first.pop('mel_filters' )
UpperCAmelCase = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def a_ ( self ) -> str:
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = os.path.join(lowercase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowercase_ )
UpperCAmelCase = self.feature_extraction_class.from_json_file(lowercase_ )
UpperCAmelCase = feat_extract_first.to_dict()
UpperCAmelCase = feat_extract_second.to_dict()
UpperCAmelCase = dict_first.pop('mel_filters' )
UpperCAmelCase = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def a_ ( self ) -> int:
# Initialize feature_extractor
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCAmelCase = feature_extractor(
lowercase_ , return_tensors='np' , sampling_rate=4_4_1_0_0 , mask_audio=lowercase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(lowercase_ )
UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a_ ( self , lowercase_ ) -> Optional[Any]:
UpperCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def a_ ( self ) -> Tuple:
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = TvltFeatureExtractor()
UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
UpperCAmelCase = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowercase_ , atol=1E-4 ) )
| 183 | 1 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | """simple docstring"""
from manim import *
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : Any = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase : str = Rectangle(height=0.2_5 , width=0.2_5 )
_lowerCAmelCase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Tuple = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : str = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Any = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : List[str] = Text("""CPU""" , font_size=24 )
_lowerCAmelCase : Union[str, Any] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(4 )]
_lowerCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = Text("""GPU""" , font_size=24 )
_lowerCAmelCase : Dict = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : str = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Optional[Any] = Text("""Model""" , font_size=24 )
_lowerCAmelCase : Any = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : str = []
_lowerCAmelCase : List[Any] = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
_lowerCAmelCase : str = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
model_cpu_arr.append(a__ )
self.add(*a__ , *a__ , *a__ )
_lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Optional[Any] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Optional[Any] = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase : List[Any] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a__ )
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
for i, rect in enumerate(a__ ):
_lowerCAmelCase : Optional[Any] = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
ckpt_arr.append(a__ )
_lowerCAmelCase : Dict = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a__ )
self.add(*a__ , *a__ )
_lowerCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase : int = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_lowerCAmelCase : List[str] = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a__ )
_lowerCAmelCase : List[str] = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_lowerCAmelCase : int = [meta_mem.copy() for i in range(6 )]
_lowerCAmelCase : str = [meta_mem.copy() for i in range(6 )]
_lowerCAmelCase : str = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : List[Any] = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : str = Text("""Disk""" , font_size=24 )
_lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(a__ , run_time=3 ) , Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
_lowerCAmelCase : Any = []
for i, rect in enumerate(a__ ):
_lowerCAmelCase : str = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(FadeOut(a__ ) )
_lowerCAmelCase : Union[str, Any] = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ , run_time=3 ) )
self.play(
FadeOut(a__ , a__ , *a__ , *a__ ) , )
self.wait()
| 213 | 0 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCAmelCase_ : int = "scheduler_config.json"
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Any = 1
__lowerCAmelCase : str = 2
__lowerCAmelCase : Any = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Any = 5
__lowerCAmelCase : int = 6
__lowerCAmelCase : int = 7
__lowerCAmelCase : Union[str, Any] = 8
__lowerCAmelCase : Union[str, Any] = 9
__lowerCAmelCase : Union[str, Any] = 10
__lowerCAmelCase : Any = 11
__lowerCAmelCase : int = 12
__lowerCAmelCase : str = 13
__lowerCAmelCase : Optional[int] = 14
@dataclass
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : torch.FloatTensor
class a :
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = SCHEDULER_CONFIG_NAME
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Union[str, Any] = True
@classmethod
def __UpperCamelCase ( cls , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=False , **lowerCamelCase_ , ) -> List[str]:
_a : List[str] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase_ , subfolder=lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_ , return_commit_hash=lowerCamelCase_ , **lowerCamelCase_ , )
return cls.from_config(lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = False , **lowerCamelCase_ ) -> str:
self.save_config(save_directory=lowerCamelCase_ , push_to_hub=lowerCamelCase_ , **lowerCamelCase_ )
@property
def __UpperCamelCase ( self ) -> Union[str, Any]:
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls ) -> Any:
_a : List[Any] = list(set([cls.__name__] + cls._compatibles ) )
_a : List[Any] = importlib.import_module(__name__.split('.' )[0] )
_a : Tuple = [
getattr(lowerCamelCase_ , lowerCamelCase_ ) for c in compatible_classes_str if hasattr(lowerCamelCase_ , lowerCamelCase_ )
]
return compatible_classes
| 717 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def UpperCAmelCase_ ( A=None , A=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A )
@dataclass
class a :
'''simple docstring'''
__lowerCAmelCase : str = field(
metadata={"""help""": """The csv file to plot."""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
__lowerCAmelCase : Optional[List[str]] = list_field(
default=snake_case__ , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
try:
int(A )
return True
except ValueError:
return False
def UpperCAmelCase_ ( A ):
'''simple docstring'''
try:
float(A )
return True
except ValueError:
return False
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Any:
_a : Any = args
_a : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
_a : Optional[int] = csv.DictReader(lowerCamelCase_ )
for row in reader:
_a : List[str] = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_a : List[Any] = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_a : Any = float(row['result'] )
def __UpperCamelCase ( self ) -> Any:
_a , _a : Optional[Any] = plt.subplots()
_a : Any = 'Time usage' if self.args.is_time else 'Memory usage'
_a : List[str] = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_a : Tuple = sorted(set(self.result_dict[model_name]['bsz'] ) )
_a : str = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_a : Union[str, Any] = self.result_dict[model_name]['result']
((_a) , (_a)) : str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_a : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_a : List[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase_ , )
else:
_a : List[Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_a) , (_a)) : int = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_a : Union[str, Any] = np.asarray(lowerCamelCase_ , lowerCamelCase_ )[: len(lowerCamelCase_ )]
plt.scatter(
lowerCamelCase_ , lowerCamelCase_ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(lowerCamelCase_ , lowerCamelCase_ , '--' )
title_str += F''' {label_model_name} vs.'''
_a : Optional[int] = title_str[:-4]
_a : Optional[Any] = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCamelCase_ )
plt.xlabel(lowerCamelCase_ )
plt.ylabel(lowerCamelCase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Tuple = HfArgumentParser(A )
_a : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
_a : Any = Plot(args=A )
plot.plot()
if __name__ == "__main__":
main()
| 424 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : Optional[Any] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = 13
lowerCAmelCase__ = 7
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = 99
lowerCAmelCase__ = 32
lowerCAmelCase__ = 2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 37
lowerCAmelCase__ = "gelu"
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 512
lowerCAmelCase__ = 16
lowerCAmelCase__ = 2
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
lowerCAmelCase__ = None
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = TFDistilBertModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = TFDistilBertForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFDistilBertForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFDistilBertForSequenceClassification(__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFDistilBertForMultipleChoice(__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFDistilBertForTokenClassification(__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :List[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
snake_case__ :Dict = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :List[str] = False
snake_case__ :Tuple = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFDistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ , dim=37 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__magic_name__ )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase__ = TFDistilBertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
class A ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowerCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ = model(__magic_name__ )[0]
lowerCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __magic_name__ )
lowerCAmelCase__ = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1E-4 )
| 48 |
lowerCamelCase : List[Any] = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
lowerCamelCase : Union[str, Any] = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = from_type.lower().strip("s" )
__lowerCAmelCase = to_type.lower().strip("s" )
__lowerCAmelCase = UNIT_SYMBOL.get(__snake_case , __snake_case )
__lowerCAmelCase = UNIT_SYMBOL.get(__snake_case , __snake_case )
if from_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(__snake_case )}"""
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(__snake_case )}"""
)
raise ValueError(__snake_case )
__lowerCAmelCase = METRIC_CONVERSION[from_sanitized]
__lowerCAmelCase = METRIC_CONVERSION[to_sanitized]
__lowerCAmelCase = 1
if from_exponent > to_exponent:
__lowerCAmelCase = from_exponent - to_exponent
else:
__lowerCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 , __snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 | 0 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : str = logging.get_logger(__name__)
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
UpperCAmelCase__ = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , A_ )
if matches:
UpperCAmelCase__ = float(matches[1] )
UpperCAmelCase__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCAmelCase__ = 1_0_0_1
UpperCAmelCase__ = 'imagenet-1k-id2label.json'
UpperCAmelCase__ = 'huggingface/label-files'
UpperCAmelCase__ = json.load(open(hf_hub_download(A_ , A_ , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ = {int(A_ ) + 1: v for k, v in idalabel.items()}
UpperCAmelCase__ = 'background'
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def a_ ( ):
UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase__ = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
UpperCAmelCase__ = get_mobilenet_va_config(A_ )
# Load 🤗 model
UpperCAmelCase__ = MobileNetVaForImageClassification(A_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(A_ , A_ , A_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCAmelCase__ = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 3_2} , )
UpperCAmelCase__ = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCAmelCase__ = model(**A_ )
UpperCAmelCase__ = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
UpperCAmelCase__ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCAmelCase__ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
UpperCAmelCase__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , A_ , atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if push_to_hub:
print('Pushing to the hub...' )
UpperCAmelCase__ = 'google/' + model_name
image_processor.push_to_hub(A_ )
model.push_to_hub(A_ )
if __name__ == "__main__":
lowerCAmelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 710 | """simple docstring"""
lowerCAmelCase__ : Tuple = range(2, 20 + 1)
lowerCAmelCase__ : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ : dict[int, dict[int, list[list[int]]]] = {}
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
UpperCAmelCase__ = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
UpperCAmelCase__ , UpperCAmelCase__ = 0, 0
UpperCAmelCase__ = n - i
UpperCAmelCase__ = memo.get(lowerCamelCase )
if sub_memo is not None:
UpperCAmelCase__ = sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase__ = -1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase__ = _k
break
if max_jump >= 0:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase__ = diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
UpperCAmelCase__ = []
else:
UpperCAmelCase__ = {c: []}
UpperCAmelCase__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase__ , UpperCAmelCase__ = next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase__ , UpperCAmelCase__ = compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase__ = 0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase__ = i
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase__ = ds_c + ds_b
diff += addend
UpperCAmelCase__ = 0
for j in range(lowerCamelCase ):
UpperCAmelCase__ = a_i[j] + addend
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
UpperCAmelCase__ = digits[j] + addend
if s >= 1_0:
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
UpperCAmelCase__ = addend // 1_0 + quotient
else:
UpperCAmelCase__ = s
UpperCAmelCase__ = addend // 1_0
if addend == 0:
break
while addend > 0:
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
digits.append(lowerCamelCase )
def a_ ( lowerCamelCase = 1_0**1_5 ):
UpperCAmelCase__ = [1]
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
while True:
UpperCAmelCase__ , UpperCAmelCase__ = next_term(lowerCamelCase , 2_0 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase__ = 0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = XLMRobertaTokenizer
UpperCamelCase : Dict = XLMRobertaTokenizerFast
UpperCamelCase : str = True
UpperCamelCase : List[Any] = True
def __A ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase = XLMRobertaTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = """<pad>"""
lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 10_02 )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = XLMRobertaTokenizer(A , keep_accents=A )
lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(A , **A )
lowerCamelCase = self.tokenizer_class.from_pretrained(A , **A )
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = tokenizer_r.save_pretrained(A )
lowerCamelCase = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCamelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
lowerCamelCase = tokenizer_r.from_pretrained(A )
lowerCamelCase = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = tokenizer_r.save_pretrained(A , legacy_format=A )
lowerCamelCase = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
lowerCamelCase = tokenizer_r.from_pretrained(A )
lowerCamelCase = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = tokenizer_r.save_pretrained(A , legacy_format=A )
lowerCamelCase = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase = tokenizer_r.from_pretrained(A )
lowerCamelCase = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@cached_property
def __A ( self ) -> Dict:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A , f.name )
lowerCamelCase = XLMRobertaTokenizer(f.name , keep_accents=A )
lowerCamelCase = pickle.dumps(A )
pickle.loads(A )
def __A ( self ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = """I was born in 92000, and this is falsé."""
lowerCamelCase = tokenizer.tokenize(A )
lowerCamelCase = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowerCamelCase = tokenizer.encode(A , add_special_tokens=A )
lowerCamelCase = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = tokenizer.encode(A )
lowerCamelCase = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = """Hello World!"""
lowerCamelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = {"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 457 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A , A ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self , A = 1 , A = None , A = 50 , A = "pil" , A = True , **A , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowerCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A , )
lowerCamelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase = self.unet(A , A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase = self.scheduler.step(A , A , A ).prev_sample
lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase = self.numpy_to_pil(A )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=A ), "This is a local test"
| 457 | 1 |
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_UpperCAmelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 703 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 494 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase:
"""simple docstring"""
a : Optional[int] =XGLMConfig
a : str ={}
a : Any ='''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_4 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=0.0_2 , ):
UpperCamelCase_: Dict = parent
UpperCamelCase_: str = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: Tuple = is_training
UpperCamelCase_: Optional[Any] = use_input_mask
UpperCamelCase_: Any = use_labels
UpperCamelCase_: List[Any] = vocab_size
UpperCamelCase_: Optional[Any] = d_model
UpperCamelCase_: List[str] = num_hidden_layers
UpperCamelCase_: List[str] = num_attention_heads
UpperCamelCase_: List[str] = ffn_dim
UpperCamelCase_: str = activation_function
UpperCamelCase_: Union[str, Any] = activation_dropout
UpperCamelCase_: str = attention_dropout
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: List[str] = initializer_range
UpperCamelCase_: str = None
UpperCamelCase_: List[Any] = 0
UpperCamelCase_: str = 2
UpperCamelCase_: Tuple = 1
def _a ( self ):
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def _a ( self ):
UpperCamelCase_: List[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase_: Dict = None
if self.use_input_mask:
UpperCamelCase_: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: Optional[int] = self.get_config()
UpperCamelCase_: Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _a ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_lowerCamelCase , )
def _a ( self ):
UpperCamelCase_: int = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): List[str] = config_and_inputs
UpperCamelCase_: Any = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : str =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
a : int =(TFXGLMForCausalLM,) if is_tf_available() else ()
a : str =(
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
a : Optional[int] =False
a : Any =False
a : Optional[int] =False
def _a ( self ):
UpperCamelCase_: Dict = TFXGLMModelTester(self )
UpperCamelCase_: Any = ConfigTester(self , config_class=_lowerCamelCase , n_embd=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
@slow
def _a ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Union[str, Any] = TFXGLMModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def _a ( self ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self , _lowerCamelCase=True ):
UpperCamelCase_: Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase_: List[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
UpperCamelCase_: int = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: str = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCamelCase_: str = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCamelCase_: int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCamelCase_: str = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase , seed=[7, 0] )
UpperCamelCase_: Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: Dict = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: List[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: List[Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: Optional[int] = 'left'
# use different length sentences to test batching
UpperCamelCase_: str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase_: List[str] = tokenizer(_lowerCamelCase , return_tensors='tf' , padding=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = inputs['input_ids']
UpperCamelCase_: Optional[Any] = model.generate(input_ids=_lowerCamelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=1_2 )
UpperCamelCase_: Dict = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase_: int = model.generate(input_ids=_lowerCamelCase , max_new_tokens=1_2 )
UpperCamelCase_: Tuple = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase_: str = model.generate(input_ids=_lowerCamelCase , max_new_tokens=1_2 )
UpperCamelCase_: Tuple = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: int = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: Dict = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [non_padded_sentence, padded_sentence] ) | 57 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def _snake_case ( A_ : Tuple ):
"""simple docstring"""
a_ , a_ : Dict = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A_ ):
for j in range(A_ ):
a_ : Dict = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__snake_case: Optional[Any] = imread("image_data/lena.jpg", 1)
# convert to its negative
__snake_case: Any = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 460 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__snake_case: Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
'''simple docstring'''
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
a_ : Tuple = eval_examples
a_ : Optional[Any] = post_process_function
a_ : List[str] = quant_trainer_args
a_ : List[str] = 1_28 # default number of calibration samples
def _lowerCAmelCase ( self , lowerCAmelCase_=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
a_ : str = calib_dataset if calib_dataset is not None else self.calib_dataset
a_ : Dict = self._remove_unused_columns(lowerCAmelCase_ , description="""Calibration""" )
return DataLoader(
lowerCAmelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCAmelCase_ , )
def _lowerCAmelCase ( self , lowerCAmelCase_=None ):
'''simple docstring'''
a_ : Union[str, Any] = self.train_dataset if calib_dataset is None else calib_dataset
a_ : int = self.get_calib_dataloader(lowerCAmelCase_ )
a_ : Dict = self.model
quant_trainer.configure_model(lowerCAmelCase_ , self.quant_trainer_args , calib=lowerCAmelCase_ )
model.eval()
quant_trainer.enable_calibration(lowerCAmelCase_ )
logger.info("""***** Running calibration *****""" )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(lowerCAmelCase_ ):
# Prediction step
a_ , a_ , a_ : Optional[int] = self.prediction_step(lowerCAmelCase_ , lowerCAmelCase_ , prediction_loss_only=lowerCAmelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCAmelCase_ , self.quant_trainer_args )
a_ : Optional[int] = model
def _lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = "eval" ):
'''simple docstring'''
a_ : str = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[str] = self.get_eval_dataloader(lowerCAmelCase_ )
a_ : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[str] = self.compute_metrics
a_ : Tuple = None
a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a_ : List[str] = eval_loop(
lowerCAmelCase_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , )
finally:
a_ : int = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
a_ : Optional[Any] = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , output.predictions )
a_ : Tuple = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
a_ : Optional[int] = metrics.pop(lowerCAmelCase_ )
self.log(lowerCAmelCase_ )
else:
a_ : Any = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase_ )
return metrics
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_ = "test" ):
'''simple docstring'''
a_ : List[Any] = self.get_test_dataloader(lowerCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : Any = self.compute_metrics
a_ : Dict = None
a_ : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a_ : Dict = eval_loop(
lowerCAmelCase_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , )
finally:
a_ : int = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : Tuple = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , output.predictions , """predict""" )
a_ : List[Any] = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
a_ : Tuple = metrics.pop(lowerCAmelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_="./" ):
'''simple docstring'''
a_ : Any = self.eval_dataset
a_ : Tuple = self.get_eval_dataloader(lowerCAmelCase_ )
a_ : List[str] = next(iter(lowerCAmelCase_ ) )
# saving device - to make it consistent
a_ : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
a_ : Optional[int] = tuple(v.to(lowerCAmelCase_ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
a_ : List[str] = True
a_ : str = self.model.to(lowerCAmelCase_ )
model.eval()
model.float()
a_ : Optional[int] = model.module if hasattr(lowerCAmelCase_ , """module""" ) else model
quant_trainer.configure_model(lowerCAmelCase_ , self.quant_trainer_args )
a_ : Union[str, Any] = os.path.join(lowerCAmelCase_ , """model.onnx""" )
logger.info(f'''exporting model to {output_model_file}''' )
a_ : Dict = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , export_params=lowerCAmelCase_ , opset_version=13 , do_constant_folding=lowerCAmelCase_ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=lowerCAmelCase_ , )
logger.info("""onnx export finished""" )
| 460 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "audio-spectrogram-transformer"
def __init__( self : List[Any] ,lowercase_ : int=7_6_8 ,lowercase_ : Optional[Any]=1_2 ,lowercase_ : Tuple=1_2 ,lowercase_ : Union[str, Any]=3_0_7_2 ,lowercase_ : Tuple="gelu" ,lowercase_ : Optional[Any]=0.0 ,lowercase_ : List[Any]=0.0 ,lowercase_ : Optional[int]=0.02 ,lowercase_ : str=1E-12 ,lowercase_ : Tuple=1_6 ,lowercase_ : Optional[Any]=True ,lowercase_ : Dict=1_0 ,lowercase_ : int=1_0 ,lowercase_ : Any=1_0_2_4 ,lowercase_ : str=1_2_8 ,**lowercase_ : List[str] ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = patch_size
lowerCAmelCase__ : Tuple = qkv_bias
lowerCAmelCase__ : Dict = frequency_stride
lowerCAmelCase__ : List[str] = time_stride
lowerCAmelCase__ : str = max_length
lowerCAmelCase__ : str = num_mel_bins
| 450 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__UpperCamelCase : Any = None
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
__UpperCamelCase : Union[str, Any] = '''▁'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = AlbertTokenizer
def __init__( self : Union[str, Any] ,lowercase_ : List[str]=None ,lowercase_ : Union[str, Any]=None ,lowercase_ : str=True ,lowercase_ : Optional[Any]=True ,lowercase_ : str=False ,lowercase_ : Tuple="[CLS]" ,lowercase_ : Optional[int]="[SEP]" ,lowercase_ : Optional[Any]="<unk>" ,lowercase_ : List[Any]="[SEP]" ,lowercase_ : Optional[int]="<pad>" ,lowercase_ : List[Any]="[CLS]" ,lowercase_ : Optional[int]="[MASK]" ,**lowercase_ : Optional[int] ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ : Optional[Any] = (
AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ,normalized=lowercase_ )
if isinstance(lowercase_ ,lowercase_ )
else mask_token
)
super().__init__(
lowercase_ ,tokenizer_file=lowercase_ ,do_lower_case=lowercase_ ,remove_space=lowercase_ ,keep_accents=lowercase_ ,bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,sep_token=lowercase_ ,pad_token=lowercase_ ,cls_token=lowercase_ ,mask_token=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : List[Any] = do_lower_case
lowerCAmelCase__ : str = remove_space
lowerCAmelCase__ : Optional[int] = keep_accents
lowerCAmelCase__ : Dict = vocab_file
lowerCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def __lowerCAmelCase ( self : Dict ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : List[Any] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file ,lowercase_ )
return (out_vocab_file,)
| 450 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'gptsan-japanese'
lowerCamelCase = [
'past_key_values',
]
lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str],lowercase_ : str=3_6_0_0_0,lowercase_ : Any=1_2_8_0,lowercase_ : int=1_0_2_4,lowercase_ : str=8_1_9_2,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Optional[int]=1_2_8,lowercase_ : Optional[Any]=1_0,lowercase_ : int=0,lowercase_ : int=1_6,lowercase_ : Any=1_6,lowercase_ : Optional[Any]=1_2_8,lowercase_ : Any=0.0,lowercase_ : Any=1E-5,lowercase_ : Tuple=False,lowercase_ : Dict=0.0,lowercase_ : Union[str, Any]="float32",lowercase_ : Union[str, Any]=False,lowercase_ : List[Any]=False,lowercase_ : Union[str, Any]=False,lowercase_ : str=0.002,lowercase_ : Optional[Any]=False,lowercase_ : str=True,lowercase_ : Dict=3_5_9_9_8,lowercase_ : Any=3_5_9_9_5,lowercase_ : Union[str, Any]=3_5_9_9_9,**lowercase_ : Optional[Any],)-> Dict:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = d_ff
A__ = d_ext
A__ = d_spout
A__ = num_switch_layers
A__ = num_ext_layers
A__ = num_switch_layers + num_ext_layers
A__ = num_heads
A__ = num_experts
A__ = expert_capacity
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = router_bias
A__ = router_jitter_noise
A__ = router_dtype
A__ = router_ignore_padding_tokens
A__ = output_hidden_states
A__ = output_attentions
A__ = initializer_factor
A__ = output_router_logits
A__ = use_cache
super().__init__(
separator_token_id=lowercase_,pad_token_id=lowercase_,eos_token_id=lowercase_,**lowercase_,)
| 586 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = ReformerTokenizer
lowerCamelCase = ReformerTokenizerFast
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = True
def snake_case__ ( self : Any )-> Union[str, Any]:
'''simple docstring'''
super().setUp()
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = '<s>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<unk>' )
self.assertEqual(vocab_keys[1],'<s>' )
self.assertEqual(vocab_keys[-1],'j' )
self.assertEqual(len(lowercase_ ),1_0_0_0 )
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 )
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : str,lowercase_ : int=1_5 )-> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],)
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
],)
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],)
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
],)
@cached_property
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
A__ = 'Hello World!'
A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@slow
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A__ = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
A__ = ' '.join(lowercase_ )
A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' )
A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' )
A__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A__ = encoded_sequence['input_ids'].shape
A__ = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A__ = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
| 586 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def _UpperCamelCase ( __UpperCamelCase = 1_00_00_00 ,__UpperCamelCase = 10 ) -> int:
lowerCamelCase_ = defaultdict(__a )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCamelCase_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
lowerCamelCase_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__a ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCAmelCase_ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *lowercase_ : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 512 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self : Optional[int] ,a__ : Optional[Any] ,a__ : Union[str, Any]=13 ,a__ : Any=30 ,a__ : List[str]=2 ,a__ : str=3 ,a__ : Optional[int]=True ,a__ : Optional[Any]=True ,a__ : Any=32 ,a__ : str=5 ,a__ : str=4 ,a__ : str=37 ,a__ : Union[str, Any]="gelu" ,a__ : Tuple=0.1 ,a__ : Tuple=0.1 ,a__ : Union[str, Any]=10 ,a__ : Optional[int]=0.02 ,a__ : Tuple=3 ,a__ : Optional[int]=None ,a__ : Optional[Any]=2 ,) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Any = parent
_lowerCAmelCase:Optional[int] = batch_size
_lowerCAmelCase:str = image_size
_lowerCAmelCase:Any = patch_size
_lowerCAmelCase:Tuple = num_channels
_lowerCAmelCase:Dict = is_training
_lowerCAmelCase:int = use_labels
_lowerCAmelCase:Optional[Any] = hidden_size
_lowerCAmelCase:List[Any] = num_hidden_layers
_lowerCAmelCase:Tuple = num_attention_heads
_lowerCAmelCase:int = intermediate_size
_lowerCAmelCase:Tuple = hidden_act
_lowerCAmelCase:Dict = hidden_dropout_prob
_lowerCAmelCase:Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase:List[Any] = type_sequence_label_size
_lowerCAmelCase:Dict = initializer_range
_lowerCAmelCase:Tuple = scope
_lowerCAmelCase:Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowerCAmelCase:int = (image_size // patch_size) ** 2
_lowerCAmelCase:Optional[Any] = num_patches + 2
def __UpperCamelCase ( self : Dict) -> Any:
"""simple docstring"""
_lowerCAmelCase:int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase:str = None
if self.use_labels:
_lowerCAmelCase:Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
_lowerCAmelCase:Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=a__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __UpperCamelCase ( self : Union[str, Any] ,a__ : Any ,a__ : Tuple ,a__ : Optional[int]) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = DeiTModel(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:List[Any] = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Tuple ,a__ : Optional[Any] ,a__ : int ,a__ : str) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:str = DeiTForMaskedImageModeling(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:int = model(a__)
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_lowerCAmelCase:Dict = 1
_lowerCAmelCase:Optional[Any] = DeiTForMaskedImageModeling(a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCAmelCase:Tuple = model(a__)
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def __UpperCamelCase ( self : Optional[int] ,a__ : Any ,a__ : Any ,a__ : str) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Any = self.type_sequence_label_size
_lowerCAmelCase:List[str] = DeiTForImageClassification(a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Optional[int] = model(a__ ,labels=a__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCAmelCase:Any = 1
_lowerCAmelCase:Tuple = DeiTForImageClassification(a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCAmelCase:Dict = model(a__ ,labels=a__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def __UpperCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_lowerCAmelCase:Any = self.prepare_config_and_inputs()
(
_lowerCAmelCase
):Union[str, Any] = config_and_inputs
_lowerCAmelCase:str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
snake_case__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __UpperCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = DeiTModelTester(self)
_lowerCAmelCase:Optional[Any] = ConfigTester(self ,config_class=a__ ,has_text_modality=a__ ,hidden_size=37)
def __UpperCamelCase ( self : Dict) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''')
def __UpperCamelCase ( self : int) -> Any:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:Optional[int] = model_class(a__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
_lowerCAmelCase:Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ ,nn.Linear))
def __UpperCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:Optional[int] = model_class(a__)
_lowerCAmelCase:List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase:List[Any] = [*signature.parameters.keys()]
_lowerCAmelCase:List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,a__)
def __UpperCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def __UpperCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__)
def __UpperCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__)
def __UpperCamelCase ( self : Any ,a__ : Union[str, Any] ,a__ : Optional[Any] ,a__ : int=False) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:List[str] = super()._prepare_for_class(a__ ,a__ ,return_labels=a__)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self : Dict) -> Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
_lowerCAmelCase:int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase:Tuple = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a__)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_lowerCAmelCase:Tuple = model_class(a__)
model.to(a__)
model.train()
_lowerCAmelCase:Tuple = self._prepare_for_class(a__ ,a__ ,return_labels=a__)
_lowerCAmelCase:Optional[int] = model(**a__).loss
loss.backward()
def __UpperCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase:Dict = False
_lowerCAmelCase:Any = True
for model_class in self.all_model_classes:
if model_class in get_values(a__) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_lowerCAmelCase:Union[str, Any] = model_class(a__)
model.gradient_checkpointing_enable()
model.to(a__)
model.train()
_lowerCAmelCase:Tuple = self._prepare_for_class(a__ ,a__ ,return_labels=a__)
_lowerCAmelCase:Dict = model(**a__).loss
loss.backward()
def __UpperCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase:Any = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a__),
*get_values(a__),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}'):
_lowerCAmelCase:Tuple = problem_type['''title''']
_lowerCAmelCase:int = problem_type['''num_labels''']
_lowerCAmelCase:str = model_class(a__)
model.to(a__)
model.train()
_lowerCAmelCase:List[Any] = self._prepare_for_class(a__ ,a__ ,return_labels=a__)
if problem_type["num_labels"] > 1:
_lowerCAmelCase:Tuple = inputs['''labels'''].unsqueeze(1).repeat(1 ,problem_type['''num_labels'''])
_lowerCAmelCase:Tuple = inputs['''labels'''].to(problem_type['''dtype'''])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a__) as warning_list:
_lowerCAmelCase:List[Any] = model(**a__).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}')
loss.backward()
@slow
def __UpperCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase:Union[str, Any] = DeiTModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def UpperCAmelCase ( ):
_lowerCAmelCase:Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''')
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : int) -> int:
"""simple docstring"""
_lowerCAmelCase:Tuple = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''').to(
a__)
_lowerCAmelCase:Optional[Any] = self.default_image_processor
_lowerCAmelCase:Union[str, Any] = prepare_img()
_lowerCAmelCase:Optional[int] = image_processor(images=a__ ,return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
_lowerCAmelCase:Dict = model(**a__)
# verify the logits
_lowerCAmelCase:Optional[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape ,a__)
_lowerCAmelCase:List[str] = torch.tensor([-1.0266, 0.1912, -1.2861]).to(a__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a__ ,atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:int = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' ,torch_dtype=torch.floataa ,device_map='''auto''')
_lowerCAmelCase:Union[str, Any] = self.default_image_processor
_lowerCAmelCase:Any = prepare_img()
_lowerCAmelCase:Optional[Any] = image_processor(images=a__ ,return_tensors='''pt''')
_lowerCAmelCase:List[str] = inputs.pixel_values.to(a__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowerCAmelCase:Optional[int] = model(a__)
| 700 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class a__ ( UpperCamelCase_ ):
def __init__( self : int ,*a__ : Optional[Any] ,**a__ : Union[str, Any]) -> Tuple:
"""simple docstring"""
super().__init__(*a__ ,**a__)
requires_backends(self ,'''vision''')
self.check_model_type(a__)
def __call__( self : str ,a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**a__ : List[str]) -> Optional[int]:
"""simple docstring"""
return super().__call__(a__ ,**a__)
def __UpperCamelCase ( self : Union[str, Any] ,**a__ : List[Any]) -> Any:
"""simple docstring"""
return {}, {}, {}
def __UpperCamelCase ( self : Tuple ,a__ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = load_image(a__)
_lowerCAmelCase:int = image.size
_lowerCAmelCase:int = self.image_processor(images=a__ ,return_tensors=self.framework)
return model_inputs
def __UpperCamelCase ( self : Dict ,a__ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Any = self.model(**a__)
return model_outputs
def __UpperCamelCase ( self : List[Any] ,a__ : Dict) -> Any:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = model_outputs.predicted_depth
_lowerCAmelCase:Union[str, Any] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=a__)
_lowerCAmelCase:List[str] = prediction.squeeze().cpu().numpy()
_lowerCAmelCase:Any = (output * 255 / np.max(a__)).astype('''uint8''')
_lowerCAmelCase:Dict = Image.fromarray(a__)
_lowerCAmelCase:Tuple = {}
_lowerCAmelCase:Optional[int] = predicted_depth
_lowerCAmelCase:str = depth
return output_dict
| 439 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=1_3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[Any]=3_2 , lowerCAmelCase_ : str=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Any=0.9 , lowerCAmelCase_ : Union[str, Any]=None , ) -> Optional[int]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = tubelet_size
__lowerCAmelCase = num_frames
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = mask_ratio
__lowerCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__lowerCAmelCase = int(mask_ratio * self.seq_length )
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[Any] ) -> List[str]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = VideoMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> int:
__lowerCAmelCase = VideoMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowerCAmelCase = torch.ones((self.num_masks,) )
__lowerCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__lowerCAmelCase = mask.expand(self.batch_size , -1 ).bool()
__lowerCAmelCase = model(lowerCAmelCase_ , lowerCAmelCase_ )
# model only returns predictions for masked patches
__lowerCAmelCase = mask.sum().item()
__lowerCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a_ = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = VideoMAEModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=False ) -> Any:
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowerCAmelCase = torch.ones((self.model_tester.num_masks,) )
__lowerCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__lowerCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
__lowerCAmelCase = bool_masked_pos.to(lowerCAmelCase_ )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase_ ),
]:
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
def lowercase ( self : Optional[int] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
@slow
def lowercase ( self : Any ) -> Tuple:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = VideoMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
if not self.has_attentions:
pass
else:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
__lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
__lowerCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowerCAmelCase = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase ( self : List[Any] ) -> str:
def check_hidden_states_output(lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
__lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
__lowerCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> Optional[Any]:
pass
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video', filename='eating_spaghetti.npy', repo_type='dataset' )
__lowerCAmelCase = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase ( self : str ) -> int:
__lowerCAmelCase = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# add boolean mask, indicating which patches to mask
__lowerCAmelCase = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
__lowerCAmelCase = torch.load(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__lowerCAmelCase = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=lowerCAmelCase_ )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__lowerCAmelCase = torch.tensor([0.51_42] , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__lowerCAmelCase = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=lowerCAmelCase_ ).to(
lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(torch.tensor([0.64_69] ) , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
| 53 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Any = VQModel
_snake_case : List[Any] = 'sample'
@property
def A ( self : List[str] , A_ : str=(32, 32) )-> int:
__UpperCamelCase = 4
__UpperCamelCase = 3
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A_ )
return {"sample": image}
@property
def A ( self : str )-> Dict:
return (3, 32, 32)
@property
def A ( self : str )-> str:
return (3, 32, 32)
def A ( self : Optional[Any] )-> List[Any]:
__UpperCamelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
__UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def A ( self : Union[str, Any] )-> Dict:
pass
def A ( self : Optional[Any] )-> str:
pass
def A ( self : int )-> Dict:
__UpperCamelCase , __UpperCamelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(A_ )
__UpperCamelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A ( self : List[Any] )-> Union[str, Any]:
__UpperCamelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(A_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__UpperCamelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__UpperCamelCase = image.to(A_ )
with torch.no_grad():
__UpperCamelCase = model(A_ ).sample
__UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCamelCase = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) ) | 228 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure) | 228 | 1 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
lowercase_ : Union[str, Any] = field(
default=__snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
lowercase_ : Tuple = field(
default=__snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__snake_case )} , )
lowercase_ : List[str] = field(
default=__snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase_ : Optional[Any] = field(
default=__snake_case , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase_ : Union[str, Any] = field(
default=__snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _lowerCAmelCase :
lowercase_ : List[str] = field(
default=__snake_case , metadata={'''help''': '''The input training data file (a text file).'''} )
lowercase_ : str = field(
default=__snake_case , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
lowercase_ : Dict = field(
default=__snake_case , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowercase_ : Dict = field(
default=__snake_case , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
lowercase_ : List[Any] = field(
default=__snake_case , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
lowercase_ : Union[str, Any] = field(
default=__snake_case , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
lowercase_ : Dict = field(
default=__snake_case , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
lowercase_ : Optional[int] = field(default=__snake_case , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
lowercase_ : Optional[Any] = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowercase_ : Tuple = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
lowercase_ : Optional[Any] = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
lowercase_ : Optional[int] = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
lowercase_ : Optional[int] = field(
default=__snake_case , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
"""simple docstring"""
def _dataset(UpperCamelCase__ , UpperCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=SCREAMING_SNAKE_CASE_ , file_path=SCREAMING_SNAKE_CASE_ , block_size=args.block_size , ref_path=SCREAMING_SNAKE_CASE_ , )
return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE_ , file_path=SCREAMING_SNAKE_CASE_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=SCREAMING_SNAKE_CASE_ , file_path=SCREAMING_SNAKE_CASE_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=SCREAMING_SNAKE_CASE_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
_UpperCAmelCase = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE_ )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
_UpperCAmelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCAmelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_UpperCAmelCase = (
get_dataset(SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCAmelCase = (
get_dataset(SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , evaluate=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCAmelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCAmelCase = DataCollatorForWholeWordMask(
tokenizer=SCREAMING_SNAKE_CASE_ , mlm_probability=data_args.mlm_probability )
else:
_UpperCAmelCase = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , prediction_loss_only=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
_UpperCAmelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output["eval_loss"] )
_UpperCAmelCase = {"perplexity": perplexity}
_UpperCAmelCase = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(SCREAMING_SNAKE_CASE_ )
return results
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 657 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__magic_name__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__magic_name__ ) , 0 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
# Check that tokenizer_type ≠ model_type
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , config=__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__magic_name__ , """vocab.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""bert""" , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__magic_name__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__magic_name__ , """merges.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""gpt2""" , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@require_tokenizers
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__magic_name__ , """vocab.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""bert""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__magic_name__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__magic_name__ , """merges.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""gpt2""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with pytest.raises(__magic_name__ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCamelCase = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __magic_name__ )
else:
self.assertEqual(tokenizer.do_lower_case , __magic_name__ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__magic_name__ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
UpperCamelCase = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = TOKENIZER_MAPPING.values()
UpperCamelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__magic_name__ )
@require_tokenizers
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__magic_name__ ) , __magic_name__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __magic_name__ )
@require_tokenizers
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__magic_name__ )
UpperCamelCase = """Hello, world. How are you?"""
UpperCamelCase = tokenizer.tokenize(__magic_name__ )
self.assertEqual("""[UNK]""" , tokens[0] )
UpperCamelCase = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__magic_name__ )
UpperCamelCase = tokenizer.tokenize(__magic_name__ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__magic_name__ ) , __magic_name__ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = get_tokenizer_config("""bert-base-cased""" )
UpperCamelCase = config.pop("""_commit_hash""" , __magic_name__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__magic_name__ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCamelCase = get_tokenizer_config(__magic_name__ )
self.assertDictEqual(__magic_name__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = get_tokenizer_config(__magic_name__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
UpperCamelCase = CustomTokenizer.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __magic_name__ )
# Can register in two steps
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__magic_name__ , slow_tokenizer_class=__magic_name__ , fast_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = BertTokenizerFast.from_pretrained(__magic_name__ )
bert_tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaises(__magic_name__ ):
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__magic_name__ ):
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
class UpperCAmelCase ( __snake_case ):
lowercase = False
class UpperCAmelCase ( __snake_case ):
lowercase = NewTokenizer
lowercase = False
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
__magic_name__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
__magic_name__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 386 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case : Optional[int] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE: PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[float, List[float]]] = None , **_SCREAMING_SNAKE_CASE: Tuple , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = size if size is not None else {"height": 224, "width": 224}
__lowerCAmelCase : int = get_size_dict(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
__lowerCAmelCase : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name="crop_size")
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : List[Any] = do_rescale
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : Optional[Any] = do_center_crop
__lowerCAmelCase : str = crop_size
__lowerCAmelCase : List[str] = size
__lowerCAmelCase : List[Any] = resample
__lowerCAmelCase : Tuple = rescale_factor
__lowerCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Dict[str, int] , _SCREAMING_SNAKE_CASE: PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[str] , ) -> np.ndarray:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE)
if "shortest_edge" in size:
__lowerCAmelCase : List[Any] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=_SCREAMING_SNAKE_CASE)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__lowerCAmelCase : Optional[int] = (size["height"], size["width"])
else:
raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""")
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Dict[str, int] , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[str] , ) -> np.ndarray:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: int) -> np.ndarray:
"""simple docstring"""
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Union[float, List[float]] , _SCREAMING_SNAKE_CASE: Union[float, List[float]] , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: int , ) -> np.ndarray:
"""simple docstring"""
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: ImageInput , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: PILImageResampling = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[float] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: Union[str, ChannelDimension] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: Any , ) -> BatchFeature:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = resample if resample is not None else self.resample
__lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : Optional[int] = size if size is not None else self.size
__lowerCAmelCase : Any = get_size_dict(_SCREAMING_SNAKE_CASE)
if not is_batched(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Dict = [images]
if not valid_images(_SCREAMING_SNAKE_CASE):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
# All transformations expect numpy arrays.
__lowerCAmelCase : Tuple = [to_numpy_array(_SCREAMING_SNAKE_CASE) for image in images]
if do_resize:
__lowerCAmelCase : Tuple = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE) for image in images]
if do_center_crop:
__lowerCAmelCase : int = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE) for image in images]
if do_rescale:
__lowerCAmelCase : Dict = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE) for image in images]
if do_normalize:
__lowerCAmelCase : str = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE) for image in images]
__lowerCAmelCase : List[str] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) for image in images]
__lowerCAmelCase : int = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE) | 615 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Any=0) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.random.RandomState(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.get_dummy_inputs()
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : List[str] = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
__lowerCAmelCase : str = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : str = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.get_dummy_inputs()
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Any = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.get_dummy_inputs()
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Tuple = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : List[Any] = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Optional[Any] = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Any) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.get_dummy_inputs()
__lowerCAmelCase : List[str] = 3 * [inputs["prompt"]]
# forward
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__lowerCAmelCase : Union[str, Any] = 3 * [inputs.pop("prompt")]
__lowerCAmelCase : Union[str, Any] = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , )
__lowerCAmelCase : Dict = text_inputs["input_ids"]
__lowerCAmelCase : str = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
__lowerCAmelCase : Union[str, Any] = prompt_embeds
# forward
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = self.get_dummy_inputs()
__lowerCAmelCase : Optional[int] = 3 * ["this is a negative prompt"]
__lowerCAmelCase : Union[str, Any] = negative_prompt
__lowerCAmelCase : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__lowerCAmelCase : Any = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = 3 * [inputs.pop("prompt")]
__lowerCAmelCase : Dict = []
for p in [prompt, negative_prompt]:
__lowerCAmelCase : Optional[Any] = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , )
__lowerCAmelCase : Any = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
__lowerCAmelCase , __lowerCAmelCase : List[str] = embeds
# forward
__lowerCAmelCase : int = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = ort.SessionOptions()
__lowerCAmelCase : List[str] = False
return options
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
np.random.seed(0)
__lowerCAmelCase : str = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np")
__lowerCAmelCase : Union[str, Any] = output.images
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : Dict = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: str) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = "open neural network exchange"
__lowerCAmelCase : Union[str, Any] = np.random.RandomState(0)
__lowerCAmelCase : List[str] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np")
__lowerCAmelCase : Tuple = output.images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : Optional[Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = "open neural network exchange"
__lowerCAmelCase : Any = np.random.RandomState(0)
__lowerCAmelCase : int = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np")
__lowerCAmelCase : Optional[Any] = output.images
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : List[Any] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = 0
def test_callback_fn(_SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: np.ndarray) -> None:
__lowerCAmelCase : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase : Optional[int] = latents[0, -3:, -3:, -1]
__lowerCAmelCase : List[str] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase : Tuple = latents[0, -3:, -3:, -1]
__lowerCAmelCase : Any = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = "Andromeda galaxy in a bottle"
__lowerCAmelCase : Any = np.random.RandomState(0)
pipe(
prompt=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
assert pipe.safety_checker is None
__lowerCAmelCase : Optional[Any] = pipe("example prompt" , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCAmelCase : Optional[Any] = pipe("example prompt" , num_inference_steps=2).images[0]
assert image is not None | 615 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, ) -> Dict:
UpperCAmelCase__ =parent
UpperCAmelCase__ =13
UpperCAmelCase__ =7
UpperCAmelCase__ =True
UpperCAmelCase__ =True
UpperCAmelCase__ =True
UpperCAmelCase__ =99
UpperCAmelCase__ =32
UpperCAmelCase__ =2
UpperCAmelCase__ =4
UpperCAmelCase__ =37
UpperCAmelCase__ ="gelu"
UpperCAmelCase__ =0.1
UpperCAmelCase__ =0.1
UpperCAmelCase__ =512
UpperCAmelCase__ =16
UpperCAmelCase__ =2
UpperCAmelCase__ =0.02
UpperCAmelCase__ =3
UpperCAmelCase__ =4
UpperCAmelCase__ =None
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase__ =None
if self.use_input_mask:
UpperCAmelCase__ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ =None
UpperCAmelCase__ =None
UpperCAmelCase__ =None
if self.use_labels:
UpperCAmelCase__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase__ =ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase__ =EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, pad_token_id=1, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ) -> Optional[Any]:
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) =self.prepare_config_and_inputs()
UpperCAmelCase__ =True
UpperCAmelCase__ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =TFEsmModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ ={"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ =model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ =[input_ids, input_mask]
UpperCAmelCase__ =model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, ) -> Optional[Any]:
UpperCAmelCase__ =True
UpperCAmelCase__ =TFEsmModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ ={
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
UpperCAmelCase__ =model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ =[input_ids, input_mask]
UpperCAmelCase__ =model(_SCREAMING_SNAKE_CASE, encoder_hidden_states=_SCREAMING_SNAKE_CASE )
# Also check the case where encoder outputs are not passed
UpperCAmelCase__ =model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_ ) -> int:
UpperCAmelCase__ =TFEsmForMaskedLM(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ =model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =self.num_labels
UpperCAmelCase__ =TFEsmForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ ={"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) =config_and_inputs
UpperCAmelCase__ ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( a, a, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =TFEsmModelTester(self )
UpperCAmelCase__ =ConfigTester(self, config_class=_SCREAMING_SNAKE_CASE, hidden_size=37 )
def __UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ =TFEsmModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip("Protein models do not support embedding resizing." )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def __UpperCAmelCase ( self ) -> Dict:
pass
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(_SCREAMING_SNAKE_CASE )
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase__ =model.get_bias()
assert isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
for k, v in name.items():
assert isinstance(_SCREAMING_SNAKE_CASE, tf.Variable )
else:
UpperCAmelCase__ =model.get_output_embeddings()
assert x is None
UpperCAmelCase__ =model.get_bias()
assert name is None
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCAmelCase__ =tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ =model(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase__ =[1, 6, 33]
self.assertEqual(list(output.numpy().shape ), _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase__ =tf.constant(
[
[
[8.92_15_18, -10.58_9814, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_1377, -1.1_21_19_15],
[-7.78_12_47, -13.95_1557, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-2 ) )
@slow
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCAmelCase__ =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase__ =model(_SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
UpperCAmelCase__ =tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4 ) )
| 625 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __magic_name__ ( _UpperCamelCase ):
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
return 0.0
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 5_1_2
_lowerCAmelCase = [1] + [0] * (size - 1)
_lowerCAmelCase = [filter_type.process(__lowerCamelCase ) for item in inputs]
_lowerCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase = np.abs(np.fft.fft(__lowerCamelCase ) )
_lowerCAmelCase = 2_0 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4, samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_lowerCAmelCase = get_bounds(__lowerCamelCase, __lowerCamelCase )
plt.ylim(max([-8_0, bounds[0]] ), min([8_0, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(__lowerCamelCase )
plt.show()
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 5_1_2
_lowerCAmelCase = [1] + [0] * (size - 1)
_lowerCAmelCase = [filter_type.process(__lowerCamelCase ) for item in inputs]
_lowerCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4, samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(__lowerCamelCase, -2 * pi ) )
plt.show()
| 309 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=1_0 , __magic_name__=3 , __magic_name__=2 , __magic_name__=2 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__="divided_space_time" , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_frames
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = attention_type
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
_lowerCAmelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = (num_frames) * self.num_patches_per_frame + 1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_lowerCAmelCase = self.num_labels
return config
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ )
# verify the logits shape
_lowerCAmelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Optional[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase : int = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Dict = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Tuple = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TimesformerModelTester(self )
_lowerCAmelCase = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = self.model_tester.seq_length
_lowerCAmelCase = self.model_tester.num_frames
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_lowerCAmelCase = len(__magic_name__ )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
_lowerCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video', filename='eating_spaghetti.npy', repo_type='dataset' )
_lowerCAmelCase = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
__magic_name__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(video[:8] , return_tensors='pt' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__magic_name__ )
# verify the logits
_lowerCAmelCase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_lowerCAmelCase = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 309 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__UpperCAmelCase = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_A ):
'''simple docstring'''
_snake_case : List[str] = '''maskformer'''
_snake_case : Union[str, Any] = {'''hidden_size''': '''mask_feature_size'''}
_snake_case : Dict = ['''resnet''', '''swin''']
_snake_case : List[Any] = ['''detr''']
def __init__( self , _UpperCamelCase = 2_5_6 , _UpperCamelCase = 2_5_6 , _UpperCamelCase = 0.1 , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0.02 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 20.0 , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase_ : Dict = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ : List[Any] = backbone_config.pop('model_type' )
UpperCAmelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(UpperCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
f"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase_ : Dict = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase_ : Optional[int] = (
decoder_config.pop('model_type' ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"Transformer Decoder {decoder_type} not supported, please use one of"
f" {','.join(self.decoders_supported )}" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ : Tuple = CONFIG_MAPPING[decoder_type]
UpperCAmelCase_ : List[str] = config_class.from_dict(UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = backbone_config
UpperCAmelCase_ : Tuple = decoder_config
# main feature dimension for the model
UpperCAmelCase_ : Optional[int] = fpn_feature_size
UpperCAmelCase_ : int = mask_feature_size
# initializer
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : int = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase_ : str = cross_entropy_weight
UpperCAmelCase_ : Optional[int] = dice_weight
UpperCAmelCase_ : str = mask_weight
UpperCAmelCase_ : Union[str, Any] = use_auxiliary_loss
UpperCAmelCase_ : Any = no_object_weight
UpperCAmelCase_ : Optional[int] = output_auxiliary_logits
UpperCAmelCase_ : Optional[int] = self.decoder_config.encoder_attention_heads
UpperCAmelCase_ : int = self.decoder_config.num_hidden_layers
super().__init__(**UpperCamelCase__ )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
return cls(
backbone_config=UpperCamelCase__ , decoder_config=UpperCamelCase__ , **UpperCamelCase__ , )
def __UpperCAmelCase ( self ) -> Dict[str, any]:
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[str] = self.backbone_config.to_dict()
UpperCAmelCase_ : int = self.decoder_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
| 406 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self ) -> List[Any]:
raise NotImplementedError()
| 148 |
from __future__ import annotations
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = get_failure_array(__UpperCAmelCase )
# 2) Step through text searching for pattern
a__ , a__ = 0, 0 # index into text, pattern
while i < len(__UpperCAmelCase ):
if pattern[j] == text[i]:
if j == (len(__UpperCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a__ = failure[j - 1]
continue
i += 1
return False
def __a ( __UpperCAmelCase ):
a__ = [0]
a__ = 0
a__ = 1
while j < len(__UpperCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a__ = failure[i - 1]
continue
j += 1
failure.append(__UpperCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
a_ : Tuple = 'abc1abc12'
a_ : Optional[Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
a_ : Optional[Any] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
a_ : Any = 'ABABX'
a_ : Any = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
a_ : Union[str, Any] = 'AAAB'
a_ : int = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
a_ : Tuple = 'abcdabcy'
a_ : Optional[Any] = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
a_ : Dict = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 148 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowercase: List[Any] = trt.Logger(trt.Logger.WARNING)
_lowercase: Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowercase: int = logging.getLogger(__name__)
_lowercase: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_8_4,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_2_8,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=2_0,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=3_0,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=4_2, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
_lowercase: Union[str, Any] = parser.parse_args()
if args.tokenizer_name:
_lowercase: str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
_lowercase: List[Any] = args.per_device_eval_batch_size
_lowercase: Tuple = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowercase: Optional[Any] = True
_lowercase: Optional[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
_lowercase: List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
_lowercase: List[str] = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
_lowercase: Union[str, Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowercase: Dict = [network.get_input(i) for i in range(network.num_inputs)]
_lowercase: Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowercase: Optional[int] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowercase: Any = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowercase: List[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
_lowerCAmelCase = np.asarray(inputs['input_ids'] , dtype=np.intaa )
_lowerCAmelCase = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
_lowerCAmelCase = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case )
# start time
_lowerCAmelCase = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case ) for d_inp in d_inputs] + [int(snake_case ), int(snake_case )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case , snake_case , snake_case )
cuda.memcpy_dtoh_async(snake_case , snake_case , snake_case )
# Synchronize the stream and take time
stream.synchronize()
# end time
_lowerCAmelCase = time.time()
_lowerCAmelCase = end_time - start_time
_lowerCAmelCase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowercase: Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase: List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowercase: Optional[int] = raw_datasets['''validation'''].column_names
_lowercase: str = '''question''' if '''question''' in column_names else column_names[0]
_lowercase: Dict = '''context''' if '''context''' in column_names else column_names[1]
_lowercase: Any = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowercase: Tuple = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
_lowercase: Optional[Any] = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCamelCase ( snake_case ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_lowerCAmelCase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_lowerCAmelCase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=snake_case , stride=args.doc_stride , return_overflowing_tokens=snake_case , return_offsets_mapping=snake_case , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_lowerCAmelCase = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_lowerCAmelCase = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_lowerCAmelCase = tokenized_examples.sequence_ids(snake_case )
_lowerCAmelCase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_lowerCAmelCase = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_lowerCAmelCase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
_lowercase: Any = raw_datasets['''validation''']
# Validation Feature Creation
_lowercase: str = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
_lowercase: Tuple = default_data_collator
_lowercase: List[str] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
_lowercase: List[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
_lowerCAmelCase = postprocess_qa_predictions(
examples=snake_case , features=snake_case , predictions=snake_case , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_lowerCAmelCase = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
_lowerCAmelCase = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
_lowerCAmelCase = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case , label_ids=snake_case )
_lowercase: Optional[int] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCamelCase ( snake_case ):
return trt.volume(engine.get_binding_shape(snake_case ) ) * engine.get_binding_dtype(snake_case ).itemsize
# Allocate device memory for inputs and outputs.
_lowercase: Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowercase: List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowercase: Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowercase: Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes)
_lowercase: Dict = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowercase: Dict = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
_lowercase: Dict = 0.0
_lowercase: Dict = 0
_lowercase: Dict = timeit.default_timer()
_lowercase: Optional[Any] = None
for step, batch in enumerate(eval_dataloader):
_lowercase , _lowercase: str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowercase , _lowercase: Any = outputs
_lowercase: Optional[Any] = torch.tensor(start_logits)
_lowercase: Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowercase: Dict = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
_lowercase: List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
_lowercase: Optional[int] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowercase: Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
_lowercase: List[str] = nested_truncate(all_preds, len(eval_dataset))
_lowercase: List[Any] = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0))
logger.info('''Total Number of Inference = %d''', niter)
_lowercase: int = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowercase: Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 192 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Dict , lowercase__ : Dict , lowercase__ : Optional[Any]=13 , lowercase__ : Dict=7 , lowercase__ : Dict=True , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=False , lowercase__ : Any=True , lowercase__ : Union[str, Any]=99 , lowercase__ : Optional[int]=32 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : List[str]=64 , lowercase__ : Any="gelu" , lowercase__ : Optional[Any]=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Dict=5_12 , lowercase__ : List[str]=16 , lowercase__ : Union[str, Any]=2 , lowercase__ : str=0.0_2 , lowercase__ : Optional[int]=3 , lowercase__ : Union[str, Any]=4 , lowercase__ : Union[str, Any]=None , lowercase__ : Optional[int]=2 , lowercase__ : Optional[int]=2 , lowercase__ : List[Any]=2 , lowercase__ : Optional[int]=2 , lowercase__ : Union[str, Any]=4 , lowercase__ : Tuple=1 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = q_groups
_lowerCAmelCase = k_groups
_lowerCAmelCase = v_groups
_lowerCAmelCase = post_attention_groups
_lowerCAmelCase = intermediate_groups
_lowerCAmelCase = output_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : int ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Dict , lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Tuple ):
_lowerCAmelCase = SqueezeBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , lowercase__ )
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = SqueezeBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : str , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : int ):
_lowerCAmelCase = SqueezeBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Tuple ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SqueezeBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Optional[int] ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SqueezeBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Any ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = SqueezeBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ =(
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =False
UpperCamelCase__ =True
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = SqueezeBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , dim=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = SqueezeBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
_lowerCAmelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
_lowerCAmelCase = model(lowercase__ )[0]
_lowerCAmelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase__ )
_lowerCAmelCase = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-4 ) )
| 192 | 1 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> int:
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 538 | """simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Dict=13 , _snake_case : Optional[Any]=7 , _snake_case : Union[str, Any]=False , _snake_case : Any=True , _snake_case : int=False , _snake_case : int=True , _snake_case : Tuple=33 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Dict=0.1 , _snake_case : Dict=0.1 , _snake_case : Tuple=512 , _snake_case : Any=16 , _snake_case : Union[str, Any]=2 , _snake_case : List[str]=0.02 , _snake_case : Optional[Any]=3 , _snake_case : int=4 , _snake_case : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowerCAmelCase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Tuple ) -> Any:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : str , _snake_case : int , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Any , _snake_case : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = EsmModel(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case )
SCREAMING_SNAKE_CASE__ = model(_snake_case )
SCREAMING_SNAKE_CASE__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : int ) -> int:
SCREAMING_SNAKE_CASE__ = EsmForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict , _snake_case : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = EsmForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = False
a = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
a = ()
a = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
def lowerCAmelCase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = EsmModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def lowerCAmelCase_ ( self : str ) -> List[str]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowerCAmelCase_ ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(*_snake_case )
def lowerCAmelCase_ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def lowerCAmelCase_ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def lowerCAmelCase_ ( self : Tuple ) -> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = EsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ = EsmEmbeddings(config=_snake_case )
SCREAMING_SNAKE_CASE__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE__ = create_position_ids_from_input_ids(_snake_case , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case ) ) )
def lowerCAmelCase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ = EsmEmbeddings(config=_snake_case )
SCREAMING_SNAKE_CASE__ = torch.empty(2 , 4 , 30 )
SCREAMING_SNAKE_CASE__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE__ = embeddings.create_position_ids_from_inputs_embeds(_snake_case )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase_ ( self : List[str] ) -> int:
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase_ ( self : Any ) -> Tuple:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
pass
@require_torch
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Dict ) -> str:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ = model(_snake_case )[0]
SCREAMING_SNAKE_CASE__ = 33
SCREAMING_SNAKE_CASE__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _snake_case )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Any:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE__ = model(_snake_case )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) )
| 538 | 1 |
def a__ ( lowercase__ = 1_0_0_0_0_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =limit + 1
UpperCAmelCase_ =[0] * limit
for first_term in range(1 , lowercase__ ):
for n in range(lowercase__ , lowercase__ , lowercase__ ):
UpperCAmelCase_ =first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ =sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54 |
"""simple docstring"""
def A ( _A = 600_851_475_143 ):
"""simple docstring"""
try:
snake_case_ :Dict = int(_A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
snake_case_ :Dict = 2
snake_case_ :Any = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case_ :List[Any] = i
while n % i == 0:
snake_case_ :str = n // i
i += 1
return int(_A )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 584 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A: Optional[int] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Union[str, Any] = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Dict = ["""CLIPFeatureExtractor"""]
_A: List[str] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: List[Any] = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Optional[Any] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: List[Any] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_A: Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 617 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A: List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Union[str, Any] = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_A: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 617 | 1 |
"""simple docstring"""
import numpy as np
from PIL import Image
def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray:
lowerCamelCase : str = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowerCamelCase : Optional[int] = 0
lowerCamelCase : List[str] = 0
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : int = 0
# compute the shape of the output matrix
lowerCamelCase : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase : str = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase : Optional[int] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase : str = 0
lowerCamelCase : int = 0
return updated_arr
def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray:
lowerCamelCase : Tuple = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowerCamelCase : str = 0
lowerCamelCase : Dict = 0
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCamelCase : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Union[str, Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__lowerCamelCase :List[Any] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 222 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A__ :
"""simple docstring"""
def __init__( self: Union[str, Any] )-> List[str]:
lowerCamelCase : Optional[int] = {}
def a__ ( self: Any , __a: int , __a: List[Any] , __a: Optional[int]=1 )-> Optional[Any]:
if self.graph.get(__a ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase : Tuple = [[w, v]]
if not self.graph.get(__a ):
lowerCamelCase : Optional[Any] = []
def a__ ( self: str )-> str:
return list(self.graph )
def a__ ( self: Any , __a: int , __a: Any )-> int:
if self.graph.get(__a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__a )
def a__ ( self: Optional[int] , __a: str=-2 , __a: Optional[Any]=-1 )-> int:
if s == d:
return []
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : str = []
if s == -2:
lowerCamelCase : List[Any] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__a ) != 0:
lowerCamelCase : int = stack[len(__a ) - 1]
else:
lowerCamelCase : Dict = ss
# check if se have reached the starting point
if len(__a ) == 0:
return visited
def a__ ( self: str , __a: str=-1 )-> Optional[Any]:
if c == -1:
lowerCamelCase : List[str] = floor(random() * 10_000 ) + 10
for i in range(__a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(__a , __a , 1 )
def a__ ( self: Any , __a: int=-2 )-> List[str]:
lowerCamelCase : List[Any] = deque()
lowerCamelCase : List[str] = []
if s == -2:
lowerCamelCase : str = list(self.graph )[0]
d.append(__a )
visited.append(__a )
while d:
lowerCamelCase : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a__ ( self: Tuple , __a: Union[str, Any] )-> Union[str, Any]:
lowerCamelCase : Optional[int] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a__ ( self: Optional[Any] , __a: Any )-> Optional[int]:
return len(self.graph[u] )
def a__ ( self: Optional[int] , __a: Tuple=-2 )-> List[Any]:
lowerCamelCase : Any = []
lowerCamelCase : Optional[Any] = []
if s == -2:
lowerCamelCase : List[Any] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : Tuple = s
lowerCamelCase : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__a ) != 0:
lowerCamelCase : Dict = stack[len(__a ) - 1]
else:
lowerCamelCase : Tuple = ss
# check if se have reached the starting point
if len(__a ) == 0:
return sorted_nodes
def a__ ( self: Dict )-> Tuple:
lowerCamelCase : Any = []
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : List[str] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : Union[str, Any] = -2
lowerCamelCase : Optional[int] = []
lowerCamelCase : Optional[int] = s
lowerCamelCase : str = False
lowerCamelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase : List[Any] = len(__a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase : Optional[int] = True
if len(__a ) != 0:
lowerCamelCase : Tuple = stack[len(__a ) - 1]
else:
lowerCamelCase : List[str] = False
indirect_parents.append(__a )
lowerCamelCase : Any = s
lowerCamelCase : int = ss
# check if se have reached the starting point
if len(__a ) == 0:
return list(__a )
def a__ ( self: Any )-> int:
lowerCamelCase : str = []
lowerCamelCase : Any = []
lowerCamelCase : List[Any] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : Any = -2
lowerCamelCase : Optional[int] = []
lowerCamelCase : Tuple = s
lowerCamelCase : Tuple = False
lowerCamelCase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase : str = len(__a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase : List[Any] = True
if len(__a ) != 0:
lowerCamelCase : List[str] = stack[len(__a ) - 1]
else:
lowerCamelCase : str = False
indirect_parents.append(__a )
lowerCamelCase : Any = s
lowerCamelCase : List[str] = ss
# check if se have reached the starting point
if len(__a ) == 0:
return False
def a__ ( self: Optional[int] , __a: Tuple=-2 , __a: List[Any]=-1 )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = time()
self.dfs(__a , __a )
lowerCamelCase : Tuple = time()
return end - begin
def a__ ( self: List[str] , __a: Optional[Any]=-2 )-> List[Any]:
lowerCamelCase : str = time()
self.bfs(__a )
lowerCamelCase : Tuple = time()
return end - begin
class A__ :
"""simple docstring"""
def __init__( self: Any )-> Tuple:
lowerCamelCase : List[Any] = {}
def a__ ( self: Tuple , __a: Any , __a: int , __a: List[Any]=1 )-> Union[str, Any]:
# check if the u exists
if self.graph.get(__a ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase : Any = [[w, v]]
# add the other way
if self.graph.get(__a ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase : Dict = [[w, u]]
def a__ ( self: Tuple , __a: List[str] , __a: List[str] )-> Any:
if self.graph.get(__a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__a )
# the other way round
if self.graph.get(__a ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__a )
def a__ ( self: Any , __a: str=-2 , __a: str=-1 )-> Tuple:
if s == d:
return []
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = []
if s == -2:
lowerCamelCase : Tuple = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__a ) != 0:
lowerCamelCase : List[str] = stack[len(__a ) - 1]
else:
lowerCamelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(__a ) == 0:
return visited
def a__ ( self: Any , __a: Tuple=-1 )-> List[Any]:
if c == -1:
lowerCamelCase : Any = floor(random() * 10_000 ) + 10
for i in range(__a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(__a , __a , 1 )
def a__ ( self: Tuple , __a: int=-2 )-> str:
lowerCamelCase : Dict = deque()
lowerCamelCase : int = []
if s == -2:
lowerCamelCase : str = list(self.graph )[0]
d.append(__a )
visited.append(__a )
while d:
lowerCamelCase : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a__ ( self: str , __a: str )-> Any:
return len(self.graph[u] )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = []
lowerCamelCase : Tuple = []
lowerCamelCase : str = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : List[str] = -2
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Optional[Any] = s
lowerCamelCase : List[Any] = False
lowerCamelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase : Any = len(__a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase : Any = True
if len(__a ) != 0:
lowerCamelCase : Tuple = stack[len(__a ) - 1]
else:
lowerCamelCase : Dict = False
indirect_parents.append(__a )
lowerCamelCase : str = s
lowerCamelCase : Any = ss
# check if se have reached the starting point
if len(__a ) == 0:
return list(__a )
def a__ ( self: Any )-> Union[str, Any]:
lowerCamelCase : str = []
lowerCamelCase : str = []
lowerCamelCase : str = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowerCamelCase : List[str] = -2
lowerCamelCase : List[str] = []
lowerCamelCase : Optional[int] = s
lowerCamelCase : Tuple = False
lowerCamelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase : Tuple = len(__a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase : str = True
if len(__a ) != 0:
lowerCamelCase : Optional[int] = stack[len(__a ) - 1]
else:
lowerCamelCase : Tuple = False
indirect_parents.append(__a )
lowerCamelCase : List[str] = s
lowerCamelCase : List[Any] = ss
# check if se have reached the starting point
if len(__a ) == 0:
return False
def a__ ( self: Tuple )-> Optional[int]:
return list(self.graph )
def a__ ( self: Optional[Any] , __a: Dict=-2 , __a: Optional[Any]=-1 )-> Optional[int]:
lowerCamelCase : List[str] = time()
self.dfs(__a , __a )
lowerCamelCase : Optional[int] = time()
return end - begin
def a__ ( self: Union[str, Any] , __a: Optional[Any]=-2 )-> Any:
lowerCamelCase : Tuple = time()
self.bfs(__a )
lowerCamelCase : Optional[int] = time()
return end - begin
| 222 | 1 |
"""simple docstring"""
lowercase__ : List[str] = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowercase__ : str = ['''a''', '''b''', '''c''', '''d''', '''e''']
def __lowercase ( _a , _a , _a ):
snake_case_ : List[str] = start
# add current to visited
visited.append(lowerCamelCase__ )
snake_case_ : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case_ : int = topological_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# if all neighbors visited add current to sort
sort.append(lowerCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
snake_case_ : Tuple = topological_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowercase__ : Any = topological_sort('''a''', [], [])
print(sort)
| 715 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( lowerCAmelCase__):
def _snake_case ( self : int ):
snake_case_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowercase_ , '''num_heads''' ) )
class _UpperCAmelCase :
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=64 , lowercase_ : Any=3 , lowercase_ : Any=[16, 48, 96] , lowercase_ : List[Any]=[1, 3, 6] , lowercase_ : Union[str, Any]=[1, 2, 10] , lowercase_ : Optional[Any]=[7, 3, 3] , lowercase_ : Union[str, Any]=[4, 2, 2] , lowercase_ : Tuple=[2, 1, 1] , lowercase_ : List[str]=[2, 2, 2] , lowercase_ : Union[str, Any]=[False, False, True] , lowercase_ : Optional[int]=[0.0, 0.0, 0.0] , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=True , lowercase_ : Optional[int]=True , lowercase_ : Optional[Any]=2 , ):
snake_case_ : List[Any] = parent
snake_case_ : int = batch_size
snake_case_ : Union[str, Any] = image_size
snake_case_ : Tuple = patch_sizes
snake_case_ : List[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Any = is_training
snake_case_ : Any = use_labels
snake_case_ : str = num_labels
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : int = num_heads
snake_case_ : List[str] = stride_kv
snake_case_ : Any = depth
snake_case_ : Dict = cls_token
snake_case_ : Dict = attention_drop_rate
snake_case_ : int = initializer_range
snake_case_ : Tuple = layer_norm_eps
def _snake_case ( self : Dict ):
snake_case_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : str = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Dict = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : int ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _snake_case ( self : int , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = TFCvtModel(config=lowercase_ )
snake_case_ : Tuple = model(lowercase_ , training=lowercase_ )
snake_case_ : int = (self.image_size, self.image_size)
snake_case_, snake_case_ : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case_ : str = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case_ : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _snake_case ( self : Dict , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
snake_case_ : int = self.num_labels
snake_case_ : Any = TFCvtForImageClassification(lowercase_ )
snake_case_ : List[Any] = model(lowercase_ , labels=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Any ):
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ : List[str] = config_and_inputs
snake_case_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Optional[int] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_lowerCAmelCase : str = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase : str = False
_lowerCAmelCase : int = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : int = False
_lowerCAmelCase : int = False
def _snake_case ( self : int ):
snake_case_ : Optional[int] = TFCvtModelTester(self )
snake_case_ : str = TFCvtConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def _snake_case ( self : int ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def _snake_case ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def _snake_case ( self : str ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def _snake_case ( self : Tuple ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _snake_case ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(lowercase_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def _snake_case ( self : int ):
snake_case_, snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = model_class(lowercase_ )
snake_case_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _snake_case ( self : List[str] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str ):
snake_case_ : Any = model_class(lowercase_ )
snake_case_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ : Tuple = outputs.hidden_states
snake_case_ : str = len(self.model_tester.depth )
self.assertEqual(len(lowercase_ ) , lowercase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _snake_case ( self : str ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : List[Any] ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _snake_case ( self : Optional[Any] ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : int = TFCvtModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __lowercase ( ):
snake_case_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
@cached_property
def _snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Tuple ):
snake_case_ : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case_ : Any = self.default_image_processor
snake_case_ : Union[str, Any] = prepare_img()
snake_case_ : int = image_processor(images=lowercase_ , return_tensors='''tf''' )
# forward pass
snake_case_ : Tuple = model(**lowercase_ )
# verify the logits
snake_case_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ : Tuple = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4 ) )
| 485 | 0 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : str = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
a__ : List[str] = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = LEDTokenizer
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> str:
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
__UpperCamelCase = getattr(lowercase , pre_tok_state.pop("""type""" ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**lowercase )
__UpperCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCamelCase = """post_processor"""
__UpperCamelCase = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
__UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase = tuple(state["""sep"""] )
if "cls" in state:
__UpperCamelCase = tuple(state["""cls"""] )
__UpperCamelCase = False
if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
__UpperCamelCase = add_prefix_space
__UpperCamelCase = True
if state.get("""trim_offsets""" , lowercase ) != trim_offsets:
__UpperCamelCase = trim_offsets
__UpperCamelCase = True
if changes_to_apply:
__UpperCamelCase = getattr(lowercase , state.pop("""type""" ) )
__UpperCamelCase = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , lowercase ) -> Dict:
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
__UpperCamelCase = value
def __lowerCamelCase ( self , *lowercase , **lowercase ) -> BatchEncoding:
__UpperCamelCase = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowercase , **lowercase )
def __lowerCamelCase ( self , *lowercase , **lowercase ) -> BatchEncoding:
__UpperCamelCase = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> List[str]:
__UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict:
__UpperCamelCase = super()._pad(
encoded_inputs=lowercase , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
# Load from model defaults
if return_attention_mask is None:
__UpperCamelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowercase )
if needs_to_be_padded:
__UpperCamelCase = len(lowercase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__UpperCamelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__UpperCamelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 601 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''torch''', '''torchsde''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch""", """torchsde"""] )
| 601 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__A ) , 'Tatoeba directory does not exist.' )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=a )
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.resolver.write_model_card("opus-mt-he-en" , dry_run=a )
assert mmeta["long_pair"] == "heb-eng" | 193 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 193 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = len(snake_case__ )
_UpperCamelCase = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_UpperCamelCase = y_points[i]
for i in range(2, snake_case__ ):
for j in range(snake_case__, snake_case__ ):
_UpperCamelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , a__=0.01 , a__=10_00 ) -> List[Any]:
'''simple docstring'''
__snake_case :int = p_stop
__snake_case :List[Any] = max_length
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :str = 0
__snake_case :Optional[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case :str = random.random() < self.p_stop
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self , a__ , a__ , a__=False , a__=True ) -> List[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
__snake_case :Union[str, Any] = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case :List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case :Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case :List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
__snake_case :Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :str = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[Any] = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case :Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case :int = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case :Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :int = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case :Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :int = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[str] = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case :List[str] = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __lowercase ( self , a__ , a__ , a__ , a__=False , a__=2 , a__=False ) -> List[str]:
'''simple docstring'''
random.seed(a__ )
__snake_case :Optional[int] = list(a__ )
__snake_case :Union[str, Any] = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
__snake_case :Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
__snake_case :Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case :str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
__snake_case :int = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :int = 42
__snake_case :Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
__snake_case :Optional[int] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :str = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :str = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case :Dict = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Any = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowercase ( self ) -> Any:
'''simple docstring'''
Accelerator()
__snake_case :Union[str, Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 455 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->List[str]:
'''simple docstring'''
def decorator(_lowercase : Optional[int] ):
a : Tuple = getattr(lowercase__ , "handle_key" , [] )
handle += [key]
setattr(lowercase__ , "handle_key" , lowercase__ )
return func
return decorator
def _SCREAMING_SNAKE_CASE ( *_lowercase : Dict ) ->List[str]:
'''simple docstring'''
def decorator(_lowercase : Optional[Any] ):
a : int = getattr(lowercase__ , "handle_key" , [] )
handle += keys
setattr(lowercase__ , "handle_key" , lowercase__ )
return func
return decorator
class __UpperCamelCase ( __a ):
def __new__( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Tuple = super().__new__(cls , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , "key_handler" ):
setattr(lowerCAmelCase_ , "key_handler" , {} )
setattr(lowerCAmelCase_ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
a : Optional[Any] = getattr(lowerCAmelCase_ , "handle_key" , [] )
for key in handled_keys:
a : int = value
return new_cls
@staticmethod
def __a ( cls ) -> Optional[int]:
a : Tuple = get_character()
if char != KEYMAP["undefined"]:
a : Optional[Any] = ord(lowerCAmelCase_ )
a : int = cls.key_handler.get(lowerCAmelCase_ )
if handler:
a : List[str] = char
return handler(cls )
else:
return None
def _SCREAMING_SNAKE_CASE ( cls : Any ) ->Union[str, Any]:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 718 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def A_ ( snake_case__ = 1_00_00_00 , snake_case__ = 10 ) -> int:
_UpperCamelCase :defaultdict = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase :Tuple = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_UpperCamelCase :Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 355 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :List[str] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
_UpperCamelCase :Union[str, Any] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_28,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_42,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , x.transpose() ) )
_UpperCamelCase :Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = np.random.randn(3 , 4 )
_UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :int = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Dict = np.random.randn(3 , 4 )
_UpperCamelCase :List[str] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :int = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Dict = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :int = np.random.randn(3 , 4 )
_UpperCamelCase :Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ ) ) ) )
_UpperCamelCase :Dict = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Dict = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) )
_UpperCamelCase :Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = np.random.randn(3 , 4 )
_UpperCamelCase :Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
_UpperCamelCase :str = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :str = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Optional[int] = np.random.randn(3 , 4 )
_UpperCamelCase :Dict = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
_UpperCamelCase :Union[str, Any] = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Optional[int] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :List[str] = np.random.randn(3 , 4 )
_UpperCamelCase :str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) ) )
_UpperCamelCase :Dict = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Union[str, Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) ) )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Dict = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.squeeze(SCREAMING_SNAKE_CASE__ ) ) )
_UpperCamelCase :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) )
@require_torch
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :str = np.random.randn(1 , 3 , 4 )
_UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :List[str] = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase :List[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :str = np.random.randn(1 , 3 , 4 )
_UpperCamelCase :str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :str = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase :str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = np.random.randn(1 , 3 , 4 )
_UpperCamelCase :Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ ) ) ) )
_UpperCamelCase :Dict = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase :int = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) ) )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) )
@require_torch
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[str] = np.random.randn(3 , 4 )
_UpperCamelCase :Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :str = np.random.randn(3 , 4 )
_UpperCamelCase :int = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Tuple = np.random.randn(3 , 4 )
_UpperCamelCase :str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) ) )
| 355 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[str] ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCAmelCase : Tuple ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCAmelCase : Any ={"facebook/blenderbot-3B": 128}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['input_ids', 'attention_mask']
_snake_case = BlenderbotTokenizer
def __init__( self : str , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]="replace" , _UpperCamelCase : int="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : Any="<s>" , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : Dict="<pad>" , _UpperCamelCase : Dict="<mask>" , _UpperCamelCase : List[str]=False , _UpperCamelCase : Union[str, Any]=True , **_UpperCamelCase : List[Any] , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , __UpperCamelCase) != add_prefix_space:
_lowerCamelCase : Tuple = getattr(__UpperCamelCase , pre_tok_state.pop("""type"""))
_lowerCamelCase : Tuple = add_prefix_space
_lowerCamelCase : str = pre_tok_class(**__UpperCamelCase)
_lowerCamelCase : str = add_prefix_space
_lowerCamelCase : List[Any] = """post_processor"""
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase)
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : int = tuple(state["""sep"""])
if "cls" in state:
_lowerCamelCase : str = tuple(state["""cls"""])
_lowerCamelCase : Any = False
if state.get("""add_prefix_space""" , __UpperCamelCase) != add_prefix_space:
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : int = True
if state.get("""trim_offsets""" , __UpperCamelCase) != trim_offsets:
_lowerCamelCase : int = trim_offsets
_lowerCamelCase : Union[str, Any] = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__UpperCamelCase , state.pop("""type"""))
_lowerCamelCase : Union[str, Any] = component_class(**__UpperCamelCase)
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase)
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase) if isinstance(__UpperCamelCase , __UpperCamelCase) else value
_lowerCamelCase : int = value
def _SCREAMING_SNAKE_CASE ( self : Tuple , *_UpperCamelCase : Dict , **_UpperCamelCase : Optional[int]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = kwargs.get("""is_split_into_words""" , __UpperCamelCase)
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , *_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : str = kwargs.get("""is_split_into_words""" , __UpperCamelCase)
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase)
return tuple(__UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->int:
"""simple docstring"""
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : "Conversation") ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text)
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase)
_lowerCamelCase : Dict = """ """.join(__UpperCamelCase)
_lowerCamelCase : Union[str, Any] = self.encode(__UpperCamelCase)
if len(__UpperCamelCase) > self.model_max_length:
_lowerCamelCase : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""")
return input_ids
| 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCamelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ =None
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , ):
"""simple docstring"""
import pyspark
def generate_fn():
snake_case__ : Optional[Any] =df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
snake_case__ : Optional[Any] =df_with_partition_id.select('''*''' ).where(F'''part_id = {partition_id}''' ).drop('''part_id''' )
snake_case__ : List[str] =partition_df.collect()
snake_case__ : Dict =0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _lowerCAmelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] =df
snake_case__ : Dict =partition_order or range(self.df.rdd.getNumPartitions() )
snake_case__ : str =_generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> List[Any]:
"""simple docstring"""
yield from self.generate_examples_fn()
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
snake_case__ : int =list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict =self.split_shard_indices_by_worker(lowerCamelCase_ , lowerCamelCase_ )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ )
@property
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return len(self.partition_order )
class _lowerCAmelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
lowerCAmelCase__ =SparkConfig
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
import pyspark
snake_case__ : Optional[int] =pyspark.sql.SparkSession.builder.getOrCreate()
snake_case__ : Tuple =df
snake_case__ : List[str] =working_dir
super().__init__(
cache_dir=lowerCamelCase_ , config_name=str(self.df.semanticHash() ) , **lowerCamelCase_ , )
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
def create_cache_and_write_probe(__SCREAMING_SNAKE_CASE ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCamelCase_ )
snake_case__ : Optional[int] =os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case__ : List[str] =(
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
import pyspark
def get_arrow_batch_size(__SCREAMING_SNAKE_CASE ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
snake_case__ : List[Any] =self.df.count()
snake_case__ : List[str] =df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case__ : Optional[int] =(
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case__ : str =approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case__ : int =min(lowerCamelCase_ , int(approx_total_size / max_shard_size ) )
snake_case__ : Optional[int] =self.df.repartition(lowerCamelCase_ )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
import pyspark
snake_case__ : List[Any] =ParquetWriter if file_format == '''parquet''' else ArrowWriter
snake_case__ : str =os.path.join(self._working_dir , os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
snake_case__ : Dict =file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case__ : Any =self.config.features
snake_case__ : str =self._writer_batch_size
snake_case__ : Optional[int] =self._fs.storage_options
def write_arrow(__SCREAMING_SNAKE_CASE ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case__ : Union[str, Any] =pyspark.TaskContext().taskAttemptId()
snake_case__ : Optional[int] =next(lowerCamelCase_ , lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
snake_case__ : Optional[int] =0
snake_case__ : List[Any] =writer_class(
features=lowerCamelCase_ , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , )
snake_case__ : Optional[Any] =pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case__, snake_case__ : Any =writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
snake_case__ : List[str] =writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , )
snake_case__ : Optional[int] =pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
snake_case__, snake_case__ : int =writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
snake_case__ : List[Any] =os.path.join(os.path.dirname(lowerCamelCase_ ) , os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_ , lowerCamelCase_ )
snake_case__ : int =(
self.df.mapInArrow(lowerCamelCase_ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "arrow" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
self._validate_cache_dir()
snake_case__ : int =convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
snake_case__ : Optional[int] =not is_remote_filesystem(self._fs )
snake_case__ : Tuple =os.path.join if is_local else posixpath.join
snake_case__ : Any ='''-TTTTT-SSSSS-of-NNNNN'''
snake_case__ : Tuple =f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
snake_case__ : str =path_join(self._output_dir , lowerCamelCase_ )
snake_case__ : Union[str, Any] =0
snake_case__ : List[Any] =0
snake_case__ : int =0
snake_case__ : List[str] =[]
snake_case__ : Optional[int] =[]
for task_id, content in self._prepare_split_single(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : Union[str, Any] =content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
snake_case__ : str =total_num_examples
snake_case__ : Optional[Any] =total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
snake_case__ : Optional[Any] =all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case__ : Optional[Any] =self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
rename(
lowerCamelCase_ , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , )
snake_case__ : str =[]
snake_case__ : int =0
for i in range(len(lowerCamelCase_ ) ):
snake_case__, snake_case__ : Any =task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_ , len(lowerCamelCase_ ) ).map(lambda __SCREAMING_SNAKE_CASE : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
snake_case__ : int =0
snake_case__ : Any =task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(lowerCamelCase_ , '''''' ) , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 381 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 537 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=2 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=36 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=6 ,__UpperCAmelCase=6 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=1000 ,) -> Tuple:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Dict = text_seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : Optional[Any] = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Dict = type_vocab_size
lowerCAmelCase__ : Optional[int] = type_sequence_label_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : int = coordinate_size
lowerCAmelCase__ : Any = shape_size
lowerCAmelCase__ : str = num_labels
lowerCAmelCase__ : Tuple = num_choices
lowerCAmelCase__ : Dict = scope
lowerCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase__ : Optional[Any] = text_seq_length
lowerCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
lowerCAmelCase__ : Union[str, Any] = self.text_seq_length + self.image_seq_length
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase__ : Tuple = bbox[i, j, 3]
lowerCAmelCase__ : List[Any] = bbox[i, j, 1]
lowerCAmelCase__ : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase__ : int = bbox[i, j, 2]
lowerCAmelCase__ : Optional[int] = bbox[i, j, 0]
lowerCAmelCase__ : Optional[int] = t
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = None
if self.use_input_mask:
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
lowerCAmelCase__ : int = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Any = LayoutLMvaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# text + image
lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,pixel_values=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(
__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : int = model(__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase__ : Tuple = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : int = LayoutLMvaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : str = model(
__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Dict = self.num_labels
lowerCAmelCase__ : Any = LayoutLMvaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Tuple = model(
__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = LayoutLMvaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : int = model(
__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,start_positions=__UpperCAmelCase ,end_positions=__UpperCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(
lowerCAmelCase__
) : List[Any] = config_and_inputs
lowerCAmelCase__ : Optional[int] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = False
__lowercase : List[Any] = False
__lowercase : Tuple = False
__lowercase : List[str] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : Union[str, Any] = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = LayoutLMvaModelTester(self )
lowerCAmelCase__ : Tuple = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Any:
lowerCAmelCase__ : Union[str, Any] = copy.deepcopy(__UpperCAmelCase )
if model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(__UpperCAmelCase ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : Any = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCAmelCase )
elif model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCAmelCase )
elif model_class in [
*get_values(__UpperCAmelCase ),
]:
lowerCAmelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCAmelCase )
elif model_class in [
*get_values(__UpperCAmelCase ),
]:
lowerCAmelCase__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=__UpperCAmelCase ,)
return inputs_dict
def UpperCAmelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = LayoutLMvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> List[str]:
return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : str = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values.to(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = torch.tensor([[1, 2]] )
lowerCAmelCase__ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase__ : List[Any] = model(
input_ids=input_ids.to(__UpperCAmelCase ) ,bbox=bbox.to(__UpperCAmelCase ) ,pixel_values=pixel_values.to(__UpperCAmelCase ) ,)
# verify the logits
lowerCAmelCase__ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
| 702 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowerCAmelCase = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCamelCase : int = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : List[str] = subparsers.add_parser("""tpu-config""" , description=_description )
else:
__lowercase : Optional[Any] = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
__lowercase : List[str] = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=lowerCAmelCase_ , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=lowerCAmelCase_ , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
__lowercase : Optional[Any] = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=lowerCAmelCase_ , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Optional[Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowercase : Tuple = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowercase : Tuple = defaults.command_file
if not args.command and defaults.commands is not None:
__lowercase : List[Any] = defaults.commands
if not args.tpu_name:
__lowercase : Tuple = defaults.tpu_name
if not args.tpu_zone:
__lowercase : Dict = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowercase : str = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
__lowercase : Union[str, Any] = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase_ ):
__lowercase : Optional[int] = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
__lowercase : List[str] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase_ ):
__lowercase : Optional[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowercase : List[Any] = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
__lowercase : Optional[Any] = """; """.join(lowerCAmelCase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowercase : str = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(lowerCAmelCase_ )}" )
return
subprocess.run(lowerCAmelCase_ )
print("""Successfully setup pod.""" )
def snake_case_ ( ):
__lowercase : Tuple = tpu_command_parser()
__lowercase : str = parser.parse_args()
tpu_command_launcher(lowerCAmelCase_ ) | 149 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCamelCase : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default=__a , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__a )} , )
_A : Optional[str] = field(
default=__a , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_A : bool = field(
default=__a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_A : Optional[str] = field(default=__a , metadata={'''help''': '''The input training data file (a text file).'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_A : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
_A : Optional[int] = field(
default=__a , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_A : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_A : bool = field(
default=__a , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if self.train_file is not None:
__lowercase : List[str] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowercase : int = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Any ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
__lowercase : List[str] = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowercase : Tuple = {c: dataset[c] for c in dataset.column_names}
__lowercase : List[str] = refs
return Dataset.from_dict(lowerCAmelCase_ )
def snake_case_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase : Optional[int] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowercase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowercase : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowercase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
__lowercase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
__lowercase : Optional[int] = {}
if data_args.train_file is not None:
__lowercase : List[Any] = data_args.train_file
if data_args.validation_file is not None:
__lowercase : Optional[Any] = data_args.validation_file
__lowercase : Dict = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
__lowercase : Tuple = """text"""
__lowercase : str = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : str = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowercase : List[str] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowercase : int = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
__lowercase : List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
__lowercase : List[str] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowercase : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
__lowercase : int = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
__lowercase : List[str] = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowercase : Optional[Any] = datasets["""train"""].column_names
else:
__lowercase : Dict = datasets["""validation"""].column_names
__lowercase : Tuple = """text""" if """text""" in column_names else column_names[0]
__lowercase : List[str] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : Optional[int] ):
# Remove empty lines
__lowercase : Dict = [line for line in examples["""text"""] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length )
__lowercase : List[str] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowercase : str = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowercase : Dict = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowercase : Union[str, Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowercase : Dict = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowercase : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase : Any = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowercase : Any = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowercase : List[str] = model_args.model_name_or_path
else:
__lowercase : List[Any] = None
__lowercase : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowercase : Optional[Any] = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
__lowercase : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase : List[Any] = trainer.evaluate()
__lowercase : Any = math.exp(eval_output["""eval_loss"""] )
__lowercase : Any = perplexity
__lowercase : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def snake_case_ ( lowerCAmelCase_ : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 149 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = CTRLTokenizer
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : List[Any] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
_a : Union[str, Any] = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[str] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
_a : Optional[Any] = {"unk_token": "<unk>"}
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : str ,**_a : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : int ,_a : Any ):
'''simple docstring'''
_a : Optional[Any] = "adapt react readapt apt"
_a : List[Any] = "adapt react readapt apt"
return input_text, output_text
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[int] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : List[Any] = "adapt react readapt apt"
_a : Tuple = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
_a : Optional[Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : int = tokens + [tokenizer.unk_token]
_a : Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
| 704 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase = [8, 5, 9, 7]
__lowerCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : list[int] ,_a : list[list[int]] ,_a : list[list[int]] ,):
'''simple docstring'''
_a : Dict = claim_vector
_a : List[str] = allocated_resources_table
_a : List[Any] = maximum_claim_table
def __lowercase ( self : Tuple ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowercase ( self : int ):
'''simple docstring'''
return {self.__need().index(_a ): i for i in self.__need()}
def __lowercase ( self : Optional[Any] ,**_a : Dict ):
'''simple docstring'''
_a : Optional[int] = self.__need()
_a : str = self.__allocated_resources_table
_a : int = self.__available_resources()
_a : Dict = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_a : List[str] = False
for each_need in need_list:
_a : List[str] = True
for index, need in enumerate(_a ):
if need > available_resources[index]:
_a : Dict = False
break
if execution:
_a : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : int = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(_a )
# update available/freed resources stack
_a : Optional[int] = np.array(_a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowercase ( self : Tuple ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : Tuple , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Optional[int] , ) -> None:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =size if size is not None else {'''shortest_edge''': 2_56}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : Dict =do_resize
SCREAMING_SNAKE_CASE__ : int =size
SCREAMING_SNAKE_CASE__ : Any =resample
SCREAMING_SNAKE_CASE__ : str =do_center_crop
SCREAMING_SNAKE_CASE__ : List[Any] =crop_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =do_rescale
SCREAMING_SNAKE_CASE__ : Dict =rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] =do_normalize
SCREAMING_SNAKE_CASE__ : int =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[int] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Dict =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : int =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : List[str] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : List[str] , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : Tuple , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Tuple =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Tuple =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Any =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : List[Any] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Tuple =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Tuple =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[int] =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Optional[int] =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : str =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : List[str] =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
def __magic_name__ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : List[Tuple] = None ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple =[]
for idx in range(len(__lowercase ) ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowercase )
SCREAMING_SNAKE_CASE__ : str =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Dict =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : str =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 296 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaPriorPipeline
snake_case_ = ["""prompt"""]
snake_case_ = ["""prompt""", """negative_prompt"""]
snake_case_ = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : Optional[Any] ) -> Any:
return 32
@property
def __magic_name__ ( self : int ) -> Dict:
return 32
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : str ) -> Any:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Optional[Any] ) -> str:
return 1_00
@property
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __magic_name__ ( self : int ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def __magic_name__ ( self : Optional[Any] ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
SCREAMING_SNAKE_CASE__ : List[Any] =PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
SCREAMING_SNAKE_CASE__ : Optional[Any] =nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __magic_name__ ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =CLIPVisionModelWithProjection(__lowercase )
return model
@property
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any =self.dummy_prior
SCREAMING_SNAKE_CASE__ : str =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Any =self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Optional[int] =self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] =UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=__lowercase , clip_sample_range=10.0 , )
SCREAMING_SNAKE_CASE__ : str ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __magic_name__ ( self : Dict , __lowercase : Dict , __lowercase : Any=0 ) -> int:
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Tuple =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] ={
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : str ='''cpu'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : int =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] =output.image_embeds
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : Any =image[0, -10:]
SCREAMING_SNAKE_CASE__ : Optional[int] =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Dict =True
SCREAMING_SNAKE_CASE__ : List[Any] =False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def __magic_name__ ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : List[Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , ) | 296 | 1 |
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
SCREAMING_SNAKE_CASE__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
SCREAMING_SNAKE_CASE__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
SCREAMING_SNAKE_CASE__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
SCREAMING_SNAKE_CASE__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
SCREAMING_SNAKE_CASE__ = re.compile(r'''\[([^\]]+)\]''')
def A ( __UpperCamelCase ) -> str:
A__ = _re_indent.search(__UpperCamelCase )
return "" if search is None else search.groups()[0]
def A ( __UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase=None , __UpperCamelCase=None ) -> List[str]:
A__ = 0
A__ = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__UpperCamelCase ):
index += 1
A__ = ['\n'.join(lines[:index] )]
else:
A__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ = [lines[index]]
index += 1
while index < len(__UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__UpperCamelCase ) )
if index < len(__UpperCamelCase ) - 1:
A__ = [lines[index + 1]]
index += 1
else:
A__ = []
else:
blocks.append('\n'.join(__UpperCamelCase ) )
A__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCamelCase ) > 0:
blocks.append('\n'.join(__UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCamelCase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def A ( __UpperCamelCase ) -> str:
def _inner(__UpperCamelCase ):
return key(__UpperCamelCase ).lower().replace('_' , '' )
return _inner
def A ( __UpperCamelCase , __UpperCamelCase=None ) -> Tuple:
# If no key is provided, we use a noop.
def noop(__UpperCamelCase ):
return x
if key is None:
A__ = noop
# Constants are all uppercase, they go first.
A__ = [obj for obj in objects if key(__UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ = [obj for obj in objects if key(__UpperCamelCase )[0].isupper() and not key(__UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
A__ = [obj for obj in objects if not key(__UpperCamelCase )[0].isupper()]
A__ = ignore_underscore(__UpperCamelCase )
return sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
# This inner function sort imports between [ ].
def _replace(__UpperCamelCase ):
A__ = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
A__ = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] ) + "]"
A__ = import_statement.split('\n' )
if len(__UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ = 2 if lines[1].strip() == '[' else 1
A__ = [(i, _re_strip_line.search(__UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ = sort_objects(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )
A__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ = _re_bracket_content.sub(_replace , lines[1] )
else:
A__ = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ = keys[:-1]
A__ = get_indent(lines[1] ) + ', '.join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] )
return "\n".join(__UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
A__ = _re_bracket_content.sub(_replace , __UpperCamelCase )
return import_statement
def A ( __UpperCamelCase , __UpperCamelCase=True ) -> List[str]:
with open(__UpperCamelCase , 'r' ) as f:
A__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ = split_code_in_indented_blocks(
__UpperCamelCase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ = main_blocks[block_idx]
A__ = block.split('\n' )
# Get to the start of the imports.
A__ = 0
while line_idx < len(__UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ = len(__UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ = '\n'.join(block_lines[line_idx:-1] )
A__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ = split_code_in_indented_blocks(__UpperCamelCase , indent_level=__UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ = [(pattern.search(__UpperCamelCase ).groups()[0] if pattern.search(__UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ = [(i, key) for i, key in enumerate(__UpperCamelCase ) if key is not None]
A__ = [x[0] for x in sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ = 0
A__ = []
for i in range(len(__UpperCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
A__ = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCamelCase ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(__UpperCamelCase , 'w' ) as f:
f.write('\n'.join(__UpperCamelCase ) )
def A ( __UpperCamelCase=True ) -> Union[str, Any]:
A__ = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
A__ = sort_imports(os.path.join(__UpperCamelCase , '__init__.py' ) , check_only=__UpperCamelCase )
if result:
A__ = [os.path.join(__UpperCamelCase , '__init__.py' )]
if len(__UpperCamelCase ) > 0:
raise ValueError(f'''Would overwrite {len(__UpperCamelCase )} files, run `make style`.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 52 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :Any=0 ):
"""simple docstring"""
lowerCamelCase__ : int =floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase_ ) )
lowerCamelCase__ : Optional[Any] =torch.manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Any =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : int =self.get_dummy_inputs()
lowerCamelCase__ : Any =pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : List[str] =image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Any =np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Dict =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase__ : Union[str, Any] =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] =self.get_dummy_inputs()
lowerCamelCase__ : Optional[Any] =pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple =np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : int =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase__ : Optional[int] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =self.get_dummy_inputs()
lowerCamelCase__ : List[Any] =pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Dict =np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : str =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase__ : List[str] =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Tuple =self.get_dummy_inputs()
lowerCamelCase__ : List[Any] =pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple =np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : str =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase__ : str =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : int =self.get_dummy_inputs()
lowerCamelCase__ : Optional[Any] =pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Dict =np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =ort.SessionOptions()
lowerCamelCase__ : Any =False
return options
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : List[str] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCamelCase__ : List[str] =init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCamelCase__ : List[str] =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Tuple ='A fantasy landscape, trending on artstation'
lowerCamelCase__ : Optional[int] =torch.manual_seed(0 )
lowerCamelCase__ : int =pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type='np' , )
lowerCamelCase__ : Optional[Any] =output.images
lowerCamelCase__ : Dict =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase__ : int =np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCamelCase__ : Optional[int] =init_image.resize((128, 128) )
lowerCamelCase__ : Tuple =LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
lowerCamelCase__ : Optional[Any] =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] ='A fantasy landscape, trending on artstation'
lowerCamelCase__ : List[str] =torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase_ , output_type='np' , )
lowerCamelCase__ : List[str] =output.images
lowerCamelCase__ : List[Any] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase__ : List[str] =np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 174 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCAmelCase_ ( snake_case_ : str ) ->str:
return "".join(sorted(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ : str ) ->list[str]:
return word_by_signature[signature(snake_case_ )]
lowerCAmelCase = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams)) | 174 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__UpperCAmelCase = logging.get_logger(__name__)
class lowercase__( __UpperCAmelCase ):
'''simple docstring'''
snake_case__ = "mask2former"
snake_case__ = ["swin"]
snake_case__ = {"hidden_size": "hidden_dim"}
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 2_56 , __SCREAMING_SNAKE_CASE = 2_56 , __SCREAMING_SNAKE_CASE = 2_56 , __SCREAMING_SNAKE_CASE = 10_24 , __SCREAMING_SNAKE_CASE = "relu" , __SCREAMING_SNAKE_CASE = 6 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 8 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 20_48 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 4 , __SCREAMING_SNAKE_CASE = 2_55 , __SCREAMING_SNAKE_CASE = 1_00 , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 2.0 , __SCREAMING_SNAKE_CASE = 5.0 , __SCREAMING_SNAKE_CASE = 5.0 , __SCREAMING_SNAKE_CASE = 1_25_44 , __SCREAMING_SNAKE_CASE = 3.0 , __SCREAMING_SNAKE_CASE = 0.75 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.")
UpperCamelCase__ : Union[str, Any] =CONFIG_MAPPING["swin"](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(_lowerCamelCase , _lowerCamelCase):
UpperCamelCase__ : Optional[int] =backbone_config.pop("model_type")
UpperCamelCase__ : int =CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ : Optional[Any] =config_class.from_dict(_lowerCamelCase)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported)}''')
UpperCamelCase__ : Any =backbone_config
UpperCamelCase__ : int =feature_size
UpperCamelCase__ : int =mask_feature_size
UpperCamelCase__ : Union[str, Any] =hidden_dim
UpperCamelCase__ : Tuple =encoder_feedforward_dim
UpperCamelCase__ : str =activation_function
UpperCamelCase__ : Tuple =encoder_layers
UpperCamelCase__ : Dict =decoder_layers
UpperCamelCase__ : Dict =num_attention_heads
UpperCamelCase__ : List[str] =dropout
UpperCamelCase__ : Optional[Any] =dim_feedforward
UpperCamelCase__ : Union[str, Any] =pre_norm
UpperCamelCase__ : str =enforce_input_projection
UpperCamelCase__ : List[Any] =common_stride
UpperCamelCase__ : List[Any] =ignore_value
UpperCamelCase__ : str =num_queries
UpperCamelCase__ : List[Any] =no_object_weight
UpperCamelCase__ : Optional[int] =class_weight
UpperCamelCase__ : Optional[int] =mask_weight
UpperCamelCase__ : str =dice_weight
UpperCamelCase__ : List[Any] =train_num_points
UpperCamelCase__ : List[str] =oversample_ratio
UpperCamelCase__ : Dict =importance_sample_ratio
UpperCamelCase__ : Tuple =init_std
UpperCamelCase__ : List[Any] =init_xavier_std
UpperCamelCase__ : Optional[Any] =use_auxiliary_loss
UpperCamelCase__ : Any =feature_strides
UpperCamelCase__ : Union[str, Any] =output_auxiliary_logits
UpperCamelCase__ : Tuple =decoder_layers
super().__init__(**_lowerCamelCase)
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
return cls(
backbone_config=_lowerCamelCase , **_lowerCamelCase , )
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =copy.deepcopy(self.__dict__)
UpperCamelCase__ : Dict =self.backbone_config.to_dict()
UpperCamelCase__ : Dict =self.__class__.model_type
return output
| 701 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase__( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = None , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Tuple =initial_learning_rate
UpperCamelCase__ : List[str] =warmup_steps
UpperCamelCase__ : List[Any] =power
UpperCamelCase__ : Optional[Any] =decay_schedule_fn
UpperCamelCase__ : List[str] =name
def __call__( self , __SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase__ : Optional[Any] =tf.cast(__SCREAMING_SNAKE_CASE , tf.floataa)
UpperCamelCase__ : Tuple =tf.cast(self.warmup_steps , tf.floataa)
UpperCamelCase__ : Optional[int] =global_step_float / warmup_steps_float
UpperCamelCase__ : List[Any] =self.initial_learning_rate * tf.math.pow(__SCREAMING_SNAKE_CASE , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCamelCase ( A_ : float , A_ : int , A_ : int , A_ : float = 0.0 , A_ : float = 0.9 , A_ : float = 0.999 , A_ : float = 1E-8 , A_ : Optional[float] = None , A_ : Optional[float] = None , A_ : float = 0.0 , A_ : float = 1.0 , A_ : Optional[List[str]] = None , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict =tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=A_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=A_ , )
if num_warmup_steps:
UpperCamelCase__ : Dict =WarmUp(
initial_learning_rate=A_ , decay_schedule_fn=A_ , warmup_steps=A_ , )
if weight_decay_rate > 0.0:
UpperCamelCase__ : Union[str, Any] =AdamWeightDecay(
learning_rate=A_ , weight_decay_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=A_ , )
else:
UpperCamelCase__ : List[Any] =tf.keras.optimizers.Adam(
learning_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = 0.0_01 , __SCREAMING_SNAKE_CASE = 0.9 , __SCREAMING_SNAKE_CASE = 0.9_99 , __SCREAMING_SNAKE_CASE = 1E-7 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "AdamWeightDecay" , **__SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =weight_decay_rate
UpperCamelCase__ : Dict =include_in_weight_decay
UpperCamelCase__ : int =exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] ={"WarmUp": WarmUp}
return super(__SCREAMING_SNAKE_CASE , cls).from_config(__SCREAMING_SNAKE_CASE , custom_objects=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self)._prepare_local(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any =tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate")
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] =self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : List[str] =list(zip(*__SCREAMING_SNAKE_CASE))
return super(__SCREAMING_SNAKE_CASE , self).apply_gradients(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , name=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase__ : Optional[int] =apply_state or {}
UpperCamelCase__ : Optional[Any] =apply_state.get((var_device, var_dtype))
if coefficients is None:
UpperCamelCase__ : Any =self._fallback_apply_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : List[Any] =self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tf.control_dependencies([decay]):
return super(__SCREAMING_SNAKE_CASE , self)._resource_apply_dense(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None) -> Dict:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Any =self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tf.control_dependencies([decay]):
return super(__SCREAMING_SNAKE_CASE , self)._resource_apply_sparse(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any =super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) is not None:
return False
return True
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self) -> int:
"""simple docstring"""
UpperCamelCase__ : str =[]
UpperCamelCase__ : List[str] =None
@property
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
if self._accum_steps is None:
UpperCamelCase__ : Any =tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients")
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
if not self._gradients:
UpperCamelCase__ : Any =self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__SCREAMING_SNAKE_CASE) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(__SCREAMING_SNAKE_CASE) != len(self._gradients):
raise ValueError(F'''Expected {len(self._gradients)} gradients, but got {len(__SCREAMING_SNAKE_CASE)}''')
for accum_gradient, gradient in zip(self._gradients , __SCREAMING_SNAKE_CASE):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__SCREAMING_SNAKE_CASE)
self._accum_steps.assign_add(1)
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__SCREAMING_SNAKE_CASE))
| 582 | 0 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A_ : int =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
self.check_model_type(a__ )
def snake_case_ ( self , a__=None , a__=None , a__=None , **a__ ):
_lowerCamelCase , _lowerCamelCase = {}, {}
if padding is not None:
_lowerCamelCase = padding
if truncation is not None:
_lowerCamelCase = truncation
if top_k is not None:
_lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a__ , a__ = None , **a__ ):
if isinstance(a__ , (Image.Image, str) ) and isinstance(a__ , a__ ):
_lowerCamelCase = {'image': image, 'question': question}
else:
_lowerCamelCase = image
_lowerCamelCase = super().__call__(a__ , **a__ )
return results
def snake_case_ ( self , a__ , a__=False , a__=False ):
_lowerCamelCase = load_image(inputs['image'] )
_lowerCamelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=a__ , truncation=a__ )
_lowerCamelCase = self.image_processor(images=a__ , return_tensors=self.framework )
model_inputs.update(a__ )
return model_inputs
def snake_case_ ( self , a__ ):
_lowerCamelCase = self.model(**a__ )
return model_outputs
def snake_case_ ( self , a__ , a__=5 ):
if top_k > self.model.config.num_labels:
_lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase = model_outputs.logits.sigmoid()[0]
_lowerCamelCase , _lowerCamelCase = probs.topk(a__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_lowerCamelCase = scores.tolist()
_lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(a__ , a__ )]
| 650 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
A_ : Dict =[
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
A_ : Dict ="""UperNetConfig"""
class __a ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 0 , a__ = False , a__ = 1 , ):
super().__init__()
_lowerCamelCase = nn.Convad(
in_channels=a__ , out_channels=a__ , kernel_size=a__ , padding=a__ , bias=a__ , dilation=a__ , )
_lowerCamelCase = nn.BatchNormad(a__ )
_lowerCamelCase = nn.ReLU()
def snake_case_ ( self , a__ ):
_lowerCamelCase = self.conv(a__ )
_lowerCamelCase = self.batch_norm(a__ )
_lowerCamelCase = self.activation(a__ )
return output
class __a ( nn.Module ):
def __init__( self , a__ , a__ , a__ ):
super().__init__()
_lowerCamelCase = [
nn.AdaptiveAvgPoolad(a__ ),
UperNetConvModule(a__ , a__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(a__ ) , a__ )
def snake_case_ ( self , a__ ):
_lowerCamelCase = input
for layer in self.layers:
_lowerCamelCase = layer(a__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ ):
super().__init__()
_lowerCamelCase = pool_scales
_lowerCamelCase = align_corners
_lowerCamelCase = in_channels
_lowerCamelCase = channels
_lowerCamelCase = []
for i, pool_scale in enumerate(a__ ):
_lowerCamelCase = UperNetPyramidPoolingBlock(pool_scale=a__ , in_channels=a__ , channels=a__ )
self.blocks.append(a__ )
self.add_module(str(a__ ) , a__ )
def snake_case_ ( self , a__ ):
_lowerCamelCase = []
for ppm in self.blocks:
_lowerCamelCase = ppm(a__ )
_lowerCamelCase = nn.functional.interpolate(
a__ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(a__ )
return ppm_outs
class __a ( nn.Module ):
def __init__( self , a__ , a__ ):
super().__init__()
_lowerCamelCase = config
_lowerCamelCase = config.pool_scales # e.g. (1, 2, 3, 6)
_lowerCamelCase = in_channels
_lowerCamelCase = config.hidden_size
_lowerCamelCase = False
_lowerCamelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
_lowerCamelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_lowerCamelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_lowerCamelCase = nn.ModuleList()
_lowerCamelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_lowerCamelCase = UperNetConvModule(a__ , self.channels , kernel_size=1 )
_lowerCamelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(a__ )
self.fpn_convs.append(a__ )
_lowerCamelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def snake_case_ ( self ):
self.apply(self._init_weights )
def snake_case_ ( self , a__ ):
if isinstance(a__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case_ ( self , a__ ):
_lowerCamelCase = inputs[-1]
_lowerCamelCase = [x]
psp_outs.extend(self.psp_modules(a__ ) )
_lowerCamelCase = torch.cat(a__ , dim=1 )
_lowerCamelCase = self.bottleneck(a__ )
return output
def snake_case_ ( self , a__ ):
# build laterals
_lowerCamelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(a__ ) )
# build top-down path
_lowerCamelCase = len(a__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_lowerCamelCase = laterals[i - 1].shape[2:]
_lowerCamelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=a__ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
_lowerCamelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_lowerCamelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
_lowerCamelCase = torch.cat(a__ , dim=1 )
_lowerCamelCase = self.fpn_bottleneck(a__ )
_lowerCamelCase = self.classifier(a__ )
return output
class __a ( nn.Module ):
def __init__( self , a__ , a__ = 2 , a__ = 3 , a__ = 1 ):
super().__init__()
_lowerCamelCase = config
_lowerCamelCase = config.auxiliary_in_channels
_lowerCamelCase = config.auxiliary_channels
_lowerCamelCase = config.auxiliary_num_convs
_lowerCamelCase = config.auxiliary_concat_input
_lowerCamelCase = in_index
_lowerCamelCase = (kernel_size // 2) * dilation
_lowerCamelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=a__ , padding=a__ , dilation=a__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=a__ , padding=a__ , dilation=a__ ) )
if self.num_convs == 0:
_lowerCamelCase = nn.Identity()
else:
_lowerCamelCase = nn.Sequential(*a__ )
if self.concat_input:
_lowerCamelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=a__ , padding=kernel_size // 2 )
_lowerCamelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def snake_case_ ( self ):
self.apply(self._init_weights )
def snake_case_ ( self , a__ ):
if isinstance(a__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case_ ( self , a__ ):
# just take the relevant feature maps
_lowerCamelCase = encoder_hidden_states[self.in_index]
_lowerCamelCase = self.convs(a__ )
if self.concat_input:
_lowerCamelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
_lowerCamelCase = self.classifier(a__ )
return output
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = UperNetConfig
SCREAMING_SNAKE_CASE__ : Optional[Any] = "pixel_values"
SCREAMING_SNAKE_CASE__ : str = True
def snake_case_ ( self , a__ ):
if isinstance(a__ , a__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def snake_case_ ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def snake_case_ ( self , a__ , a__=False ):
if isinstance(a__ , a__ ):
_lowerCamelCase = value
A_ : Union[str, Any] =R"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
A_ : int =R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , lowerCAmelCase__ , )
class __a ( lowerCAmelCase__ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCamelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_lowerCamelCase = UperNetHead(a__ , in_channels=self.backbone.channels )
_lowerCamelCase = UperNetFCNHead(a__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=a__ , config_class=_CONFIG_FOR_DOC )
def snake_case_ ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ):
_lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
_lowerCamelCase = self.backbone.forward_with_filtered_kwargs(
a__ , output_hidden_states=a__ , output_attentions=a__ )
_lowerCamelCase = outputs.feature_maps
_lowerCamelCase = self.decode_head(a__ )
_lowerCamelCase = nn.functional.interpolate(a__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=a__ )
_lowerCamelCase = None
if self.auxiliary_head is not None:
_lowerCamelCase = self.auxiliary_head(a__ )
_lowerCamelCase = nn.functional.interpolate(
a__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=a__ )
_lowerCamelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
_lowerCamelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_lowerCamelCase = loss_fct(a__ , a__ )
_lowerCamelCase = loss_fct(a__ , a__ )
_lowerCamelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_lowerCamelCase = (logits,) + outputs[1:]
else:
_lowerCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=a__ , logits=a__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 650 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''laion/clap-htsat-unfused'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **_A )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_feature_extractor()
_SCREAMING_SNAKE_CASE =ClapProcessor(tokenizer=_A , feature_extractor=_A )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_feature_extractor(do_normalize=_A , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_feature_extractor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ClapProcessor(tokenizer=_A , feature_extractor=_A )
_SCREAMING_SNAKE_CASE =floats_list((3, 1_0_0_0) )
_SCREAMING_SNAKE_CASE =feature_extractor(_A , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(audios=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_feature_extractor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ClapProcessor(tokenizer=_A , feature_extractor=_A )
_SCREAMING_SNAKE_CASE ='''This is a test string'''
_SCREAMING_SNAKE_CASE =processor(text=_A )
_SCREAMING_SNAKE_CASE =tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_feature_extractor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ClapProcessor(tokenizer=_A , feature_extractor=_A )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_A )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_feature_extractor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ClapProcessor(tokenizer=_A , feature_extractor=_A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
| 165 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = "owlvit_text_model"
def __init__( self , _A=4_9_4_0_8 , _A=5_1_2 , _A=2_0_4_8 , _A=1_2 , _A=8 , _A=1_6 , _A="quick_gelu" , _A=1E-5 , _A=0.0 , _A=0.02 , _A=1.0 , _A=0 , _A=4_9_4_0_6 , _A=4_9_4_0_7 , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =initializer_factor
@classmethod
def UpperCamelCase_ ( cls , _A , **_A ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_SCREAMING_SNAKE_CASE =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Tuple = "owlvit_vision_model"
def __init__( self , _A=7_6_8 , _A=3_0_7_2 , _A=1_2 , _A=1_2 , _A=3 , _A=7_6_8 , _A=3_2 , _A="quick_gelu" , _A=1E-5 , _A=0.0 , _A=0.02 , _A=1.0 , **_A , ):
'''simple docstring'''
super().__init__(**_A )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =initializer_factor
@classmethod
def UpperCamelCase_ ( cls , _A , **_A ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_SCREAMING_SNAKE_CASE =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Optional[int] = "owlvit"
lowercase : List[Any] = True
def __init__( self , _A=None , _A=None , _A=5_1_2 , _A=2.6592 , _A=True , **_A , ):
'''simple docstring'''
super().__init__(**_A )
if text_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
_SCREAMING_SNAKE_CASE =OwlViTTextConfig(**_A )
_SCREAMING_SNAKE_CASE =OwlViTVisionConfig(**_A )
_SCREAMING_SNAKE_CASE =projection_dim
_SCREAMING_SNAKE_CASE =logit_scale_init_value
_SCREAMING_SNAKE_CASE =return_dict
_SCREAMING_SNAKE_CASE =1.0
@classmethod
def UpperCamelCase_ ( cls , _A , **_A ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_A , **_A )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
@classmethod
def UpperCamelCase_ ( cls , _A , _A , **_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =text_config
_SCREAMING_SNAKE_CASE =vision_config
return cls.from_dict(_A , **_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.text_config.to_dict()
_SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1E-4
def UpperCamelCase_ ( self , _A , _A = -1 , _A = -1 , _A = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =super().generate_dummy_inputs(
processor.tokenizer , batch_size=_A , seq_length=_A , framework=_A )
_SCREAMING_SNAKE_CASE =super().generate_dummy_inputs(
processor.image_processor , batch_size=_A , framework=_A )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1_4
| 165 | 1 |
import qiskit
def A__ ( lowerCamelCase , lowerCamelCase ) -> qiskit.result.counts.Counts:
UpperCamelCase_: int = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCamelCase_: Union[str, Any] = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCamelCase_: List[Any] = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase_ : Dict = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 548 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ = set()
return any(
node not in visited and depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for node in graph )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
visited.add(_SCREAMING_SNAKE_CASE )
rec_stk.add(_SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 235 | 0 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase__ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase__ = (
subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('utf-8').split()
)
UpperCamelCase__ = '|'.join(sys.argv[1:])
UpperCamelCase__ = re.compile(rf'^({joined_dirs}).*?\.py$')
UpperCamelCase__ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 254 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( lowercase , lowercase , lowercase ):
UpperCamelCase : Any = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 50_257 , UpperCamelCase_ = 1_024 , UpperCamelCase_ = 768 , UpperCamelCase_ = 12 , UpperCamelCase_ = 12 , UpperCamelCase_ = None , UpperCamelCase_ = "gelu_new" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 1E-5 , UpperCamelCase_ = 0.02 , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = False , ):
super().__init__()
UpperCAmelCase__ : List[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
UpperCAmelCase__ : Dict = prefix_inner_dim
UpperCAmelCase__ : List[Any] = prefix_hidden_dim
UpperCAmelCase__ : List[Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase__ : Any = (
nn.Linear(self.prefix_hidden_dim , UpperCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase__ : Union[str, Any] = GPTaConfig(
vocab_size=UpperCamelCase_ , n_positions=UpperCamelCase_ , n_embd=UpperCamelCase_ , n_layer=UpperCamelCase_ , n_head=UpperCamelCase_ , n_inner=UpperCamelCase_ , activation_function=UpperCamelCase_ , resid_pdrop=UpperCamelCase_ , embd_pdrop=UpperCamelCase_ , attn_pdrop=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , initializer_range=UpperCamelCase_ , scale_attn_weights=UpperCamelCase_ , use_cache=UpperCamelCase_ , scale_attn_by_inverse_layer_idx=UpperCamelCase_ , reorder_and_upcast_attn=UpperCamelCase_ , )
UpperCAmelCase__ : List[str] = GPTaLMHeadModel(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
UpperCAmelCase__ : Optional[Any] = self.transformer.transformer.wte(UpperCamelCase_ )
UpperCAmelCase__ : str = self.encode_prefix(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = self.decode_prefix(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCAmelCase__ : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCAmelCase__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCAmelCase__ : List[str] = self.transformer(inputs_embeds=UpperCamelCase_ , labels=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
return torch.zeros(UpperCamelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
return self.encode_prefix(UpperCamelCase_ )
@torch.no_grad()
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = torch.split(UpperCamelCase_ , 1 , dim=0 )
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = []
for feature in features:
UpperCAmelCase__ : List[str] = self.decode_prefix(feature.to(UpperCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase__ , UpperCAmelCase__ : int = self.generate_beam(
input_embeds=UpperCamelCase_ , device=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase__ : Optional[int] = torch.stack(UpperCamelCase_ )
UpperCAmelCase__ : Any = torch.stack(UpperCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = 5 , UpperCamelCase_ = 67 , UpperCamelCase_ = 1.0 , UpperCamelCase_ = None , ):
UpperCAmelCase__ : Optional[int] = eos_token_id
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : List[str] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.int )
UpperCAmelCase__ : str = torch.zeros(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase__ : List[Any] = input_embeds
else:
UpperCAmelCase__ : Union[str, Any] = self.transformer.transformer.wte(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = self.transformer(inputs_embeds=UpperCamelCase_ )
UpperCAmelCase__ : Dict = outputs.logits
UpperCAmelCase__ : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase__ : int = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = logits.topk(UpperCamelCase_ , -1 )
UpperCAmelCase__ : List[Any] = generated.expand(UpperCamelCase_ , *generated.shape[1:] )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase__ : Dict = next_tokens
else:
UpperCAmelCase__ : Optional[int] = tokens.expand(UpperCamelCase_ , *tokens.shape[1:] )
UpperCAmelCase__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCAmelCase__ : str = -float(np.inf )
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : int = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase__ : Optional[int] = scores_sum / seq_lengths[:, None]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = scores_sum_average.view(-1 ).topk(UpperCamelCase_ , -1 )
UpperCAmelCase__ : List[Any] = next_tokens // scores_sum.shape[1]
UpperCAmelCase__ : str = seq_lengths[next_tokens_source]
UpperCAmelCase__ : str = next_tokens % scores_sum.shape[1]
UpperCAmelCase__ : Optional[Any] = next_tokens.unsqueeze(1 )
UpperCAmelCase__ : List[str] = tokens[next_tokens_source]
UpperCAmelCase__ : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
UpperCAmelCase__ : Any = generated[next_tokens_source]
UpperCAmelCase__ : Tuple = scores_sum_average * seq_lengths
UpperCAmelCase__ : Tuple = is_stopped[next_tokens_source]
UpperCAmelCase__ : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCAmelCase__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
UpperCAmelCase__ : List[Any] = is_stopped + next_tokens.eq(UpperCamelCase_ ).squeeze()
if is_stopped.all():
break
UpperCAmelCase__ : Dict = scores / seq_lengths
UpperCAmelCase__ : Optional[Any] = scores.argsort(descending=UpperCamelCase_ )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase__ : Dict = [tokens[i] for i in order]
UpperCAmelCase__ : Optional[Any] = torch.stack(UpperCamelCase_ , dim=0 )
UpperCAmelCase__ : List[str] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 254 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Tuple = int(np.ceil((x_end - xa) / step_size ) )
A__ : List[str] = np.zeros((n + 1,) )
A__ : int = ya
A__ : Optional[Any] = xa
for k in range(lowerCAmelCase ):
A__ : Optional[int] = y[k] + step_size * ode_func(lowerCAmelCase , y[k] )
A__ : Tuple = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase , y[k] ) + ode_func(x + step_size , lowerCAmelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Tuple = 'gpt_neo'
_UpperCamelCase : Optional[Any] = ['past_key_values']
_UpperCamelCase : int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , snake_case_=50_257 , snake_case_=2_048 , snake_case_=2_048 , snake_case_=24 , snake_case_=[[["global", "local"], 12]] , snake_case_=16 , snake_case_=None , snake_case_=256 , snake_case_="gelu_new" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_=True , snake_case_=50_256 , snake_case_=50_256 , **snake_case_ , ):
'''simple docstring'''
A__ : int = vocab_size
A__ : Optional[Any] = max_position_embeddings
A__ : int = hidden_size
A__ : List[Any] = num_layers
A__ : Any = num_heads
A__ : List[str] = intermediate_size
A__ : Dict = window_size
A__ : Optional[int] = activation_function
A__ : Optional[int] = resid_dropout
A__ : List[str] = embed_dropout
A__ : str = attention_dropout
A__ : List[str] = classifier_dropout
A__ : str = layer_norm_epsilon
A__ : str = initializer_range
A__ : Any = use_cache
A__ : List[str] = bos_token_id
A__ : Any = eos_token_id
A__ : Optional[Any] = attention_types
A__ : List[Any] = self.expand_attention_types_params(snake_case_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
@staticmethod
def lowerCamelCase ( snake_case_ ):
'''simple docstring'''
A__ : Optional[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
import torch
A__ : Union[str, Any] = input.size()
A__ : str = len(lowerCAmelCase )
A__ : Union[str, Any] = shape[dimension]
A__ : Dict = torch.arange(0 , lowerCAmelCase , lowerCAmelCase )
A__ : Dict = torch.div(sizedim - size , lowerCAmelCase , rounding_mode="""floor""" ) + 1
A__ : str = torch.arange(lowerCAmelCase ) + low_indices[:min_length][:, None]
A__ : str = [slice(lowerCAmelCase )] * rank
A__ : Optional[Any] = indices
A__ : Tuple = input[s]
A__ : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase )
def _A( lowerCAmelCase , lowerCAmelCase ):
import torch
A__ : Optional[Any] = torch.arange(1 , lowerCAmelCase )
A__ : int = torch.remainder(lowerCAmelCase , lowerCAmelCase )
A__ : str = remainders == 0
A__ : Tuple = candidates[divisor_indices]
A__ : Any = torch.max(lowerCAmelCase )
return largest_divisor, torch.div(lowerCAmelCase , lowerCAmelCase , rounding_mode="""floor""" )
class __UpperCAmelCase (__A ):
'''simple docstring'''
@property
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
A__ : str = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A__ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._config.num_heads
def lowerCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
'''simple docstring'''
A__ : Optional[int] = super(snake_case_ , self ).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A__ , A__ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A__ : Any = seqlen + 2
A__ : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : List[Any] = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers )
]
A__ : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
A__ : List[Any] = ordered_inputs["""attention_mask"""].dtype
A__ : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 13
| 363 | 1 |
from datetime import datetime as dt
import os
from github import Github
a_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def a__ ( ):
__lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowerCamelCase = g.get_repo('''huggingface/transformers''' )
__lowerCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowerCamelCase = sorted([comment for comment in issue.get_comments()] ,key=lambda _UpperCamelCase : i.created_at ,reverse=_UpperCamelCase )
__lowerCamelCase = comments[0] if len(_UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 622 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
__lowerCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
_lowerCamelCase = """CIDAS/clipseg-rd64-refined"""
_lowerCamelCase = """image_segmenter"""
_lowerCamelCase = CLIPSegForImageSegmentation
_lowerCamelCase = ["""image""", """text"""]
_lowerCamelCase = ["""image"""]
def __init__( self , *__A , **__A ):
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def snake_case_ ( self , __A , __A ):
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def snake_case_ ( self , __A ):
with torch.no_grad():
__a = self.model(**__A ).logits
return logits
def snake_case_ ( self , __A ):
__a = outputs.cpu().detach().numpy()
__a = 0
__a = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 99 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase: Tuple =logging.get_logger(__name__)
lowerCAmelCase: int =[
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __snake_case ( __A ) -> Dict:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowercase : Any = k.replace(__A ,__A )
if k.startswith("""encoder""" ):
lowercase : List[str] = k.replace(""".attn""" ,""".self_attn""" )
lowercase : Union[str, Any] = k.replace("""norm1""" ,"""self_attn_layer_norm""" )
lowercase : List[Any] = k.replace("""norm2""" ,"""final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowercase : Union[str, Any] = k.replace("""norm1""" ,"""self_attn_layer_norm""" )
lowercase : Tuple = k.replace("""norm2""" ,"""encoder_attn_layer_norm""" )
lowercase : Dict = k.replace("""norm3""" ,"""final_layer_norm""" )
return k
def __snake_case ( __A ) -> Dict:
lowercase : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowercase : Union[str, Any] = sd.pop(__A )
lowercase : Optional[int] = k.replace("""layernorm_embedding""" ,"""layer_norm""" )
assert new_k not in sd
lowercase : List[Any] = v
lowerCAmelCase: Union[str, Any] =["START"]
@torch.no_grad()
def __snake_case ( __A ,__A ,__A ) -> int:
lowercase : Union[str, Any] = torch.load(__A ,map_location="""cpu""" )
lowercase : Optional[Any] = model["""model"""]
lowercase : Union[str, Any] = BlenderbotConfig.from_json_file(__A )
lowercase : Optional[Any] = BlenderbotForConditionalGeneration(__A )
lowercase : List[str] = m.model.state_dict().keys()
lowercase : Optional[Any] = []
lowercase : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowercase : str = rename_state_dict_key(__A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowercase : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__A )
m.model.load_state_dict(__A ,strict=__A )
m.half()
m.save_pretrained(__A )
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
lowerCAmelCase: str =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 607 | 0 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _a ( UpperCamelCase__ ):
_lowercase : bool = field(default=UpperCamelCase__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
_lowercase : Optional[Union[str, Path, GenerationConfig]] = field(
default=UpperCamelCase__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = v.to_dict()
return d
| 429 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
def snake_case_ ( A_ : int, A_ : str, A_ : LevitConfig, A_ : Path, A_ : bool = True ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
_lowerCamelCase : int = timm.create_model('''levit_128s''', pretrained=A_ )
else:
_lowerCamelCase : Tuple = timm.create_model('''levit_128''', pretrained=A_ )
if hidden_sizes == 1_92:
_lowerCamelCase : List[str] = timm.create_model('''levit_192''', pretrained=A_ )
if hidden_sizes == 2_56:
_lowerCamelCase : Union[str, Any] = timm.create_model('''levit_256''', pretrained=A_ )
if hidden_sizes == 3_84:
_lowerCamelCase : Union[str, Any] = timm.create_model('''levit_384''', pretrained=A_ )
from_model.eval()
_lowerCamelCase : Any = LevitForImageClassificationWithTeacher(A_ ).eval()
_lowerCamelCase : int = OrderedDict()
_lowerCamelCase : Any = from_model.state_dict()
_lowerCamelCase : List[str] = list(from_model.state_dict().keys() )
_lowerCamelCase : List[str] = list(our_model.state_dict().keys() )
print(len(A_ ), len(A_ ) )
for i in range(len(A_ ) ):
_lowerCamelCase : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(A_ )
_lowerCamelCase : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
_lowerCamelCase : Union[str, Any] = from_model(A_ )
_lowerCamelCase : Optional[Any] = our_model(A_ ).logits
assert torch.allclose(A_, A_ ), "The model logits don't match the original one."
_lowerCamelCase : int = name
print(A_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_lowerCamelCase : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def snake_case_ ( A_ : Path, A_ : str = None, A_ : bool = True ):
'''simple docstring'''
_lowerCamelCase : Dict = '''imagenet-1k-id2label.json'''
_lowerCamelCase : Dict = 10_00
_lowerCamelCase : Union[str, Any] = (1, num_labels)
_lowerCamelCase : Tuple = '''huggingface/label-files'''
_lowerCamelCase : Any = num_labels
_lowerCamelCase : List[Any] = json.load(open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) )
_lowerCamelCase : List[str] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()}
_lowerCamelCase : int = partial(A_, num_labels=A_, idalabel=A_, labelaid=A_ )
_lowerCamelCase : Optional[int] = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
_lowerCamelCase : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name], A_, names_to_config[model_name], A_, A_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name], A_, A_, A_, A_ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 83 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Optional[int] = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 571 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = ['''image_processor''', '''tokenizer''']
A = '''ViltImageProcessor'''
A = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase , )
__lowerCamelCase = kwargs.pop("""feature_extractor""" )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = self.image_processor
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
__lowerCamelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel_values + pixel_mask
__lowerCamelCase = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
encoding.update(UpperCAmelCase )
return encoding
def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase , )
return self.image_processor
| 571 | 1 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCAmelCase_ ( __lowerCamelCase ):
return x + 2
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> Any:
__snake_case : Optional[Any] = "x = 3"
__snake_case : Any = {}
__snake_case : Optional[int] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3} )
__snake_case : str = "x = y"
__snake_case : List[Any] = {"y": 5}
__snake_case : Any = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 5, "y": 5} )
def __snake_case ( self : Any ) -> List[str]:
__snake_case : int = "y = add_two(x)"
__snake_case : Any = {"x": 3}
__snake_case : str = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__snake_case : str = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def __snake_case ( self : Dict ) -> str:
__snake_case : str = "x = 3"
__snake_case : List[Any] = {}
__snake_case : List[Any] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3} )
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : Union[str, Any] = "test_dict = {'x': x, 'y': add_two(x)}"
__snake_case : Tuple = {"x": 3}
__snake_case : List[Any] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __snake_case ( self : Optional[int] ) -> int:
__snake_case : int = "x = 3\ny = 5"
__snake_case : Optional[int] = {}
__snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
def __snake_case ( self : Dict ) -> Tuple:
__snake_case : List[Any] = "text = f'This is x: {x}.'"
__snake_case : List[Any] = {"x": 3}
__snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase , {"x": 3, "text": "This is x: 3."} )
def __snake_case ( self : Any ) -> Dict:
__snake_case : List[str] = "if x <= 3:\n y = 2\nelse:\n y = 5"
__snake_case : Tuple = {"x": 3}
__snake_case : int = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 2} )
__snake_case : str = {"x": 8}
__snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 8, "y": 5} )
def __snake_case ( self : int ) -> int:
__snake_case : Tuple = "test_list = [x, add_two(x)]"
__snake_case : List[str] = {"x": 3}
__snake_case : Any = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
self.assertListEqual(lowerCamelCase , [3, 5] )
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_list": [3, 5]} )
def __snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case : Optional[int] = "y = x"
__snake_case : Any = {"x": 3}
__snake_case : Union[str, Any] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 3} )
def __snake_case ( self : Any ) -> Any:
__snake_case : Optional[Any] = "test_list = [x, add_two(x)]\ntest_list[1]"
__snake_case : str = {"x": 3}
__snake_case : Optional[int] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_list": [3, 5]} )
__snake_case : str = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
__snake_case : Optional[Any] = {"x": 3}
__snake_case : Union[str, Any] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Any = "x = 0\nfor i in range(3):\n x = i"
__snake_case : Union[str, Any] = {}
__snake_case : Any = evaluate(lowerCamelCase , {"range": range} , state=lowerCamelCase )
assert result == 2
self.assertDictEqual(lowerCamelCase , {"x": 2, "i": 2} )
| 81 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__snake_case = logging.get_logger(__name__)
def a ( __a=None , __a=None ) -> Any:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase :
"""simple docstring"""
_a = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
_a = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
_a = list_field(
default=[8, 32, 1_28, 5_12] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
_a = field(
default=A__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
_a = field(
default=A__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
_a = field(
default=A__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
_a = field(default=A__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
_a = field(default=A__ , metadata={'help': 'Benchmark training of model'} )
_a = field(default=A__ , metadata={'help': 'Verbose memory tracing'} )
_a = field(
default=A__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
_a = field(
default=A__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
_a = field(default=A__ , metadata={'help': 'Trace memory line by line'} )
_a = field(default=A__ , metadata={'help': 'Save result to a CSV file'} )
_a = field(default=A__ , metadata={'help': 'Save all print statements in a log file'} )
_a = field(default=A__ , metadata={'help': 'Whether to print environment information'} )
_a = field(
default=A__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
_a = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
_a = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
_a = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
_a = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
_a = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving environment information.'} , )
_a = field(
default=f'''log_{round(time() )}.csv''' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
_a = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
_a = field(
default=A__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True | 280 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def a ( __a ) -> Optional[int]:
'''simple docstring'''
random.seed(__a )
np.random.seed(__a )
torch.manual_seed(__a )
torch.cuda.manual_seed_all(__a )
# ^^ safe to call this function even if cuda is not available
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = 0.9999 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 0 , UpperCamelCase_ = False , UpperCamelCase_ = 1.0 , UpperCamelCase_ = 2 / 3 , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , torch.nn.Module ):
UpperCamelCase__ :Optional[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ , )
UpperCamelCase__ :Optional[int] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCamelCase__ :str = True
if kwargs.get('''max_value''' , UpperCamelCase_ ) is not None:
UpperCamelCase__ :List[str] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
UpperCamelCase__ :int = kwargs['''max_value''']
if kwargs.get('''min_value''' , UpperCamelCase_ ) is not None:
UpperCamelCase__ :Union[str, Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
UpperCamelCase__ :Any = kwargs['''min_value''']
UpperCamelCase__ :Optional[int] = list(UpperCamelCase_ )
UpperCamelCase__ :Tuple = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , UpperCamelCase_ ) is not None:
UpperCamelCase__ :str = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
self.to(device=kwargs['''device'''] )
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :List[Any] = decay
UpperCamelCase__ :List[str] = min_decay
UpperCamelCase__ :Optional[int] = update_after_step
UpperCamelCase__ :int = use_ema_warmup
UpperCamelCase__ :Any = inv_gamma
UpperCamelCase__ :Union[str, Any] = power
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :Dict = None # set in `step()`
UpperCamelCase__ :List[Any] = model_cls
UpperCamelCase__ :Optional[Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Tuple = model_cls.load_config(UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ )
UpperCamelCase__ :List[str] = model_cls.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :str = cls(model.parameters() , model_cls=UpperCamelCase_ , model_config=model.config )
ema_model.load_state_dict(UpperCamelCase_ )
return ema_model
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
UpperCamelCase__ :Optional[Any] = self.model_cls.from_config(self.model_config )
UpperCamelCase__ :Optional[int] = self.state_dict()
state_dict.pop('''shadow_params''' , UpperCamelCase_ )
model.register_to_config(**UpperCamelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCamelCase__ :Dict = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCamelCase__ :int = (1 + step) / (10 + step)
UpperCamelCase__ :Any = min(UpperCamelCase_ , self.decay )
# make sure decay is not smaller than min_decay
UpperCamelCase__ :Dict = max(UpperCamelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , torch.nn.Module ):
UpperCamelCase__ :Optional[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ , )
UpperCamelCase__ :str = parameters.parameters()
UpperCamelCase__ :Dict = list(UpperCamelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCamelCase__ :Any = self.get_decay(self.optimization_step )
UpperCamelCase__ :Tuple = decay
UpperCamelCase__ :List[Any] = 1 - decay
UpperCamelCase__ :Optional[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , UpperCamelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCamelCase__ :Dict = deepspeed.zero.GatheredParameters(UpperCamelCase_ , modifier_rank=UpperCamelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = list(UpperCamelCase_ )
for s_param, param in zip(self.shadow_params , UpperCamelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self , UpperCamelCase_=None , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [
p.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ ) if p.is_floating_point() else p.to(device=UpperCamelCase_ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , UpperCamelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCamelCase__ :Optional[Any] = None
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = copy.deepcopy(UpperCamelCase_ )
UpperCamelCase__ :Dict = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
UpperCamelCase__ :Union[str, Any] = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , UpperCamelCase_ ):
raise ValueError('''Invalid min_decay''' )
UpperCamelCase__ :Union[str, Any] = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , UpperCamelCase_ ):
raise ValueError('''Invalid optimization_step''' )
UpperCamelCase__ :List[Any] = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , UpperCamelCase_ ):
raise ValueError('''Invalid update_after_step''' )
UpperCamelCase__ :List[str] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , UpperCamelCase_ ):
raise ValueError('''Invalid use_ema_warmup''' )
UpperCamelCase__ :Optional[int] = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
UpperCamelCase__ :str = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
UpperCamelCase__ :Tuple = state_dict.get('''shadow_params''' , UpperCamelCase_ )
if shadow_params is not None:
UpperCamelCase__ :Dict = shadow_params
if not isinstance(self.shadow_params , UpperCamelCase_ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(UpperCamelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' ) | 280 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.