code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( snake_case__ = ""):
lowerCAmelCase_ : str = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowerCAmelCase_ : Any = BeautifulSoup(requests.get(snake_case__).text , "html.parser")
lowerCAmelCase_ : List[str] = soup.find_all("td" , attrs="titleColumn")
lowerCAmelCase_ : Any = soup.find_all("td" , class_="ratingColumn imdbRating")
return {
title.a.text: float(rating.strong.text)
for title, rating in zip(snake_case__ , snake_case__)
}
def UpperCamelCase ( snake_case__ = "IMDb_Top_250_Movies.csv"):
lowerCAmelCase_ : Any = get_imdb_top_aaa_movies()
with open(snake_case__ , "w" , newline="") as out_file:
lowerCAmelCase_ : List[Any] = csv.writer(snake_case__)
writer.writerow(["Movie title", "IMDb rating"])
for title, rating in movies.items():
writer.writerow([title, rating])
if __name__ == "__main__":
write_movies()
| 683 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
_lowercase = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 683 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683 | 1 |
from __future__ import annotations
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = text, pattern
lowerCAmelCase_ , lowerCAmelCase_ : int = len(lowerCAmelCase__ ), len(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCAmelCase_ ( self : Optional[int] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase_ : int = self.mismatch_in_text(lowerCAmelCase__ )
if mismatch_index == -1:
positions.append(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase_ : Optional[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowercase = '''ABAABA'''
_lowercase = '''AB'''
_lowercase = BoyerMooreSearch(text, pattern)
_lowercase = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 683 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = top_db
lowerCAmelCase_ : str = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : str = fft_window_size
lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : Any = max_length_s
lowerCAmelCase_ : int = max_length_s * sampling_rate
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : int = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,)
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase_ : str = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length
lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
lowerCAmelCase_ : Dict = waveform[idx : idx + max_length]
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 )
lowerCAmelCase_ : int = False
else:
lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : Optional[Any] = [
self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = True
if isinstance(input_mel[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 683 | 1 |
def UpperCamelCase ( snake_case__ , snake_case__):
while a != 0:
lowerCAmelCase_ , lowerCAmelCase_ : str = b % a, a
return b
def UpperCamelCase ( snake_case__ , snake_case__):
if gcd(snake_case__ , snake_case__) != 1:
lowerCAmelCase_ : Any = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = 1, 0, a
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = 0, 1, m
while va != 0:
lowerCAmelCase_ : List[Any] = ua // va
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 683 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase_ : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase_ : Any = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase_ : Tuple = Pipe()
lowerCAmelCase_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
lowerCAmelCase_ : int = temp_rs
lowerCAmelCase_ : List[Any] = temp_rr
for i in range(1 , len(snake_case__) - 1):
lowerCAmelCase_ : Dict = Pipe()
lowerCAmelCase_ : List[str] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
lowerCAmelCase_ : Dict = temp_rs
lowerCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
lowerCAmelCase_ : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = list(range(10 , 0 , -1))
print("Initial List")
print(*snake_case__)
lowerCAmelCase_ : Tuple = odd_even_transposition(snake_case__)
print("Sorted List\n")
print(*snake_case__)
if __name__ == "__main__":
main()
| 683 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'bloom'
UpperCamelCase_ = ['past_key_values']
UpperCamelCase_ = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any]=25_08_80 ,lowerCAmelCase__ : Tuple=64 ,lowerCAmelCase__ : Optional[Any]=2 ,lowerCAmelCase__ : int=8 ,lowerCAmelCase__ : List[str]=1e-5 ,lowerCAmelCase__ : Any=0.02 ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : Optional[Any]=1 ,lowerCAmelCase__ : Dict=2 ,lowerCAmelCase__ : Dict=False ,lowerCAmelCase__ : Optional[Any]=0.0 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : str=1 ,lowerCAmelCase__ : Optional[Any]=False ,**lowerCAmelCase__ : List[str] ,) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase_ : List[str] = kwargs.pop("n_embed" ,lowerCAmelCase__ )
lowerCAmelCase_ : int = hidden_size if n_embed is None else n_embed
lowerCAmelCase_ : str = n_layer
lowerCAmelCase_ : Any = n_head
lowerCAmelCase_ : Dict = layer_norm_epsilon
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : int = use_cache
lowerCAmelCase_ : Tuple = pretraining_tp
lowerCAmelCase_ : Optional[int] = apply_residual_connection_post_layernorm
lowerCAmelCase_ : str = hidden_dropout
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Optional[Any] = bos_token_id
lowerCAmelCase_ : Tuple = eos_token_id
lowerCAmelCase_ : Any = slow_but_exact
super().__init__(bos_token_id=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ ,**lowerCAmelCase__ )
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = version.parse('1.12' )
def __init__( self : List[Any] ,lowerCAmelCase__ : PretrainedConfig ,lowerCAmelCase__ : str = "default" ,lowerCAmelCase__ : List[PatchingSpec] = None ,lowerCAmelCase__ : bool = False ,) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ ,task=lowerCAmelCase__ ,patching_specs=lowerCAmelCase__ ,use_past=lowerCAmelCase__ )
if not getattr(self._config ,"pad_token_id" ,lowerCAmelCase__ ):
# TODO: how to do that better?
lowerCAmelCase_ : str = 0
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCAmelCase__ ,direction="inputs" ,inverted_values_shape=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase_ : int = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self._config.n_head
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
'''simple docstring'''
return 1e-3
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : "PreTrainedTokenizer" ,lowerCAmelCase__ : int = -1 ,lowerCAmelCase__ : int = -1 ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional["TensorType"] = None ,) -> Mapping[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = super(lowerCAmelCase__ ,self ).generate_dummy_inputs(
lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,seq_length=lowerCAmelCase__ ,is_pair=lowerCAmelCase__ ,framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : int = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : List[str] = seqlen + 2
lowerCAmelCase_ : Optional[int] = self._config.hidden_size // self.num_attention_heads
lowerCAmelCase_ : Union[str, Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCAmelCase_ : Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCAmelCase_ : Optional[Any] = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : List[str] = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase_ : Dict = ordered_inputs["attention_mask"].dtype
lowerCAmelCase_ : Tuple = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase__ ,lowerCAmelCase__ ,dtype=lowerCAmelCase__ )] ,dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return 13
| 683 |
from typing import Any
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__)):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Tuple = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Any = arg_max
# The final observation
lowerCAmelCase_ : List[Any] = observations_space[len(snake_case__) - 1]
# argmax for given final observation
lowerCAmelCase_ : List[str] = ""
lowerCAmelCase_ : List[str] = -1
for k_state in states_space:
lowerCAmelCase_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Tuple = k_state
lowerCAmelCase_ : str = arg_max
# Process pointers backwards
lowerCAmelCase_ : int = last_state
lowerCAmelCase_ : int = []
for o in range(len(snake_case__) - 1 , -1 , -1):
result.append(snake_case__)
lowerCAmelCase_ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__)
_validate_dicts(
snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_list(snake_case__ , "observations_space")
_validate_list(snake_case__ , "states_space")
def UpperCamelCase ( snake_case__ , snake_case__):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list'''
raise ValueError(snake_case__)
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
_validate_dict(snake_case__ , "initial_probabilities" , snake_case__)
_validate_nested_dict(snake_case__ , "transition_probabilities")
_validate_nested_dict(snake_case__ , "emission_probabilities")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_dict(_object , snake_case__ , snake_case__)
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object):
lowerCAmelCase_ : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object.values()):
lowerCAmelCase_ : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ = field(default=snake_case__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
UpperCamelCase_ = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome.")
lowerCAmelCase_ : Optional[Any] = import_module("tasks")
try:
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , model_args.task_type)
lowerCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''')
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , snake_case__)
# Set seed
set_seed(training_args.seed)
# Prepare CONLL-2003 task
lowerCAmelCase_ : str = token_classification_task.get_labels(data_args.labels)
lowerCAmelCase_ : Dict[int, str] = dict(enumerate(snake_case__))
lowerCAmelCase_ : List[Any] = len(snake_case__)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , idalabel=snake_case__ , labelaid={label: i for i, label in enumerate(snake_case__)} , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCAmelCase_ : Any = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase_ : str = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase_ : Any = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(snake_case__ , snake_case__) -> Tuple[List[int], List[int]]:
lowerCAmelCase_ : int = np.argmax(snake_case__ , axis=2)
lowerCAmelCase_ , lowerCAmelCase_ : int = preds.shape
lowerCAmelCase_ : List[str] = [[] for _ in range(snake_case__)]
lowerCAmelCase_ : Optional[Any] = [[] for _ in range(snake_case__)]
for i in range(snake_case__):
for j in range(snake_case__):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(snake_case__) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : int = align_predictions(p.predictions , p.label_ids)
return {
"accuracy_score": accuracy_score(snake_case__ , snake_case__),
"precision": precision_score(snake_case__ , snake_case__),
"recall": recall_score(snake_case__ , snake_case__),
"f1": fa_score(snake_case__ , snake_case__),
}
# Data collator
lowerCAmelCase_ : List[str] = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase_ : List[Any] = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
lowerCAmelCase_ : Optional[int] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCAmelCase_ : str = trainer.evaluate()
lowerCAmelCase_ : Optional[Any] = os.path.join(training_args.output_dir , "eval_results.txt")
if trainer.is_world_process_zero():
with open(snake_case__ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s" , snake_case__ , snake_case__)
writer.write("%s = %s\n" % (key, value))
results.update(snake_case__)
# Predict
if training_args.do_predict:
lowerCAmelCase_ : Dict = TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = trainer.predict(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Any = align_predictions(snake_case__ , snake_case__)
lowerCAmelCase_ : Union[str, Any] = os.path.join(training_args.output_dir , "test_results.txt")
if trainer.is_world_process_zero():
with open(snake_case__ , "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , snake_case__ , snake_case__)
writer.write("%s = %s\n" % (key, value))
# Save predictions
lowerCAmelCase_ : List[str] = os.path.join(training_args.output_dir , "test_predictions.txt")
if trainer.is_world_process_zero():
with open(snake_case__ , "w") as writer:
with open(os.path.join(data_args.data_dir , "test.txt") , "r") as f:
token_classification_task.write_predictions_to_file(snake_case__ , snake_case__ , snake_case__)
return results
def UpperCamelCase ( snake_case__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 683 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 1 |
import argparse
from collections import defaultdict
import yaml
_lowercase = '''docs/source/en/_toctree.yml'''
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = defaultdict(snake_case__)
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase_ : Any = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ : Union[str, Any] = []
for duplicate_key in duplicates:
lowerCAmelCase_ : Optional[int] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key})
if len(snake_case__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others.")
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1])
# Sort
return sorted(snake_case__ , key=lambda snake_case__: s["title"].lower())
def UpperCamelCase ( snake_case__=False):
with open(snake_case__ , encoding="utf-8") as f:
lowerCAmelCase_ : Dict = yaml.safe_load(f.read())
# Get to the API doc
lowerCAmelCase_ : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ : Any = content[api_idx]["sections"]
# Then to the model doc
lowerCAmelCase_ : str = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase_ : Any = api_doc[model_idx]["sections"]
lowerCAmelCase_ : List[Any] = [(idx, section) for idx, section in enumerate(snake_case__) if "sections" in section]
lowerCAmelCase_ : List[Any] = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase_ : Any = modality_doc["sections"]
lowerCAmelCase_ : List[str] = clean_model_doc_toc(snake_case__)
if old_modality_doc != new_modality_doc:
lowerCAmelCase_ : Optional[Any] = True
if overwrite:
lowerCAmelCase_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase_ : Optional[Any] = model_doc
lowerCAmelCase_ : Tuple = api_doc
with open(snake_case__ , "w" , encoding="utf-8") as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__))
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 683 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 1 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_lowercase = parser.parse_args()
_lowercase = '''cpu'''
_lowercase = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_lowercase = '''path-to-your-trained-model'''
_lowercase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase = pipe.to(device)
# to channels last
_lowercase = pipe.unet.to(memory_format=torch.channels_last)
_lowercase = pipe.vae.to(memory_format=torch.channels_last)
_lowercase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase = torch.randn(2, 4, 64, 64)
_lowercase = torch.rand(1) * 999
_lowercase = torch.randn(2, 77, 768)
_lowercase = (sample, timestep, encoder_hidden_status)
try:
_lowercase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase = 666
_lowercase = torch.Generator(device).manual_seed(seed)
_lowercase = {'''generator''': generator}
if args.steps is not None:
_lowercase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 683 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'gpt_neox'
def __init__( self : str ,lowerCAmelCase__ : Union[str, Any]=5_04_32 ,lowerCAmelCase__ : Optional[int]=61_44 ,lowerCAmelCase__ : Union[str, Any]=44 ,lowerCAmelCase__ : int=64 ,lowerCAmelCase__ : Dict=2_45_76 ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : Union[str, Any]=0.25 ,lowerCAmelCase__ : Optional[int]=1_00_00 ,lowerCAmelCase__ : List[Any]=0.0 ,lowerCAmelCase__ : Optional[int]=0.0 ,lowerCAmelCase__ : List[str]=0.1 ,lowerCAmelCase__ : Union[str, Any]=20_48 ,lowerCAmelCase__ : Optional[Any]=0.02 ,lowerCAmelCase__ : List[str]=1e-5 ,lowerCAmelCase__ : List[Any]=True ,lowerCAmelCase__ : Any=0 ,lowerCAmelCase__ : Tuple=2 ,lowerCAmelCase__ : List[Any]=False ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : int=None ,**lowerCAmelCase__ : Union[str, Any] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : str = rotary_pct
lowerCAmelCase_ : Tuple = rotary_emb_base
lowerCAmelCase_ : List[str] = attention_dropout
lowerCAmelCase_ : List[Any] = hidden_dropout
lowerCAmelCase_ : Optional[Any] = classifier_dropout
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Optional[Any] = tie_word_embeddings
lowerCAmelCase_ : List[Any] = use_parallel_residual
lowerCAmelCase_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def UpperCAmelCase_ ( self : Any ) -> Dict:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
lowerCAmelCase_ : Dict = self.rope_scaling.get("type" ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = self.rope_scaling.get("factor" ,lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 683 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 1 |
from manim import *
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = Rectangle(height=0.5 ,width=0.5 )
lowerCAmelCase_ : int = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : List[Any] = VGroup(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : Optional[int] = Text("CPU" ,font_size=24 )
lowerCAmelCase_ : Union[str, Any] = Group(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0.5 ,aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Optional[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : Dict = Text("GPU" ,font_size=24 )
lowerCAmelCase_ : List[Any] = Group(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0.5 ,aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : Dict = Text("Model" ,font_size=24 )
lowerCAmelCase_ : Tuple = Group(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0.5 ,aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = []
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCAmelCase_ : int = Rectangle(height=0.46 / 4 ,width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] ,direction=lowerCAmelCase__ ,buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] ,direction=lowerCAmelCase__ ,buff=0.0 )
self.add(lowerCAmelCase__ )
cpu_targs.append(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Union[str, Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : Optional[int] = Text("Loaded Checkpoint" ,font_size=24 )
lowerCAmelCase_ : Tuple = Group(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,aligned_edge=lowerCAmelCase__ ,buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCAmelCase_ : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : List[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' ,font_size=18 ,)
blue_text.next_to(lowerCAmelCase__ ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
lowerCAmelCase_ : Dict = MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ ) ,Write(lowerCAmelCase__ ) )
self.play(Write(lowerCAmelCase__ ,run_time=1 ) ,Create(lowerCAmelCase__ ,run_time=1 ) )
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[Any] = []
for i, rect in enumerate(lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = fill.copy().set_fill(lowerCAmelCase__ ,opacity=0.7 )
target.move_to(lowerCAmelCase__ )
first_animations.append(GrowFromCenter(lowerCAmelCase__ ,run_time=1 ) )
lowerCAmelCase_ : Tuple = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCAmelCase__ ,run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(*lowerCAmelCase__ )
self.wait()
| 683 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowercase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {
'''squeezebert/squeezebert-uncased''': 512,
'''squeezebert/squeezebert-mnli''': 512,
'''squeezebert/squeezebert-mnli-headless''': 512,
}
_lowercase = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = SqueezeBertTokenizer
def __init__( self : Optional[int] ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : List[Any]=None ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : List[str]="[UNK]" ,lowerCAmelCase__ : Union[str, Any]="[SEP]" ,lowerCAmelCase__ : Dict="[PAD]" ,lowerCAmelCase__ : Any="[CLS]" ,lowerCAmelCase__ : List[Any]="[MASK]" ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : Any=None ,**lowerCAmelCase__ : Dict ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Dict = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : Union[str, Any] = do_lower_case
lowerCAmelCase_ : str = strip_accents
lowerCAmelCase_ : Optional[int] = tokenize_chinese_chars
lowerCAmelCase_ : Optional[Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[Any]=None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor")
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor")
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor")
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = DebertaTokenizer
UpperCamelCase_ = True
UpperCamelCase_ = DebertaTokenizerFast
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Union[str, Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[str] = {"unk_token": "[UNK]"}
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Tuple ,**lowerCAmelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = "lower newer"
lowerCAmelCase_ : int = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : str = "lower newer"
lowerCAmelCase_ : Optional[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Any = tokenizer("Hello" ,"World" )
lowerCAmelCase_ : Any = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] ,lowerCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Any ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" ,add_special_tokens=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = tokenizer.encode("multi-sequence build" ,add_special_tokens=lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.encode(
"sequence builders" ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = tokenizer.encode(
"sequence builders" ,"multi-sequence build" ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ,lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCAmelCase_ : str = tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase_ : int = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
lowerCAmelCase_ : Tuple = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = [tokenizer.decode(lowerCAmelCase__ ,skip_special_tokens=lowerCAmelCase__ ) for seq in encoding["input_ids"]]
# fmt: off
lowerCAmelCase_ : Any = {
"input_ids": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCAmelCase_ : Dict = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data ,lowerCAmelCase__ )
for expected, decoded in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
| 683 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowercase = HfApi()
_lowercase = {}
# fmt: off
_lowercase = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_lowercase = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_lowercase = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_lowercase = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_lowercase = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_lowercase = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_lowercase = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_lowercase = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_lowercase = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_lowercase = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_lowercase = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_lowercase = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_lowercase = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_lowercase = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_lowercase = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_lowercase = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowercase = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f"Started running {mod.modelId}!!!")
if mod.modelId.startswith('''CompVis'''):
_lowercase = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowercase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowercase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowercase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowercase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f"{mod.modelId} has passed successfully!!!")
| 683 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = HfArgumentParser(snake_case__)
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 683 | 1 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 |
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__):
assert type(snake_case__) in (int, float) and decimal == int(snake_case__)
lowerCAmelCase_ : Optional[Any] = int(snake_case__)
lowerCAmelCase_ : Tuple = ""
lowerCAmelCase_ : str = False
if decimal < 0:
lowerCAmelCase_ : Tuple = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Any = divmod(snake_case__ , 16)
lowerCAmelCase_ : Dict = values[remainder] + hexadecimal
lowerCAmelCase_ : List[str] = "0x" + hexadecimal
if negative:
lowerCAmelCase_ : Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowerCAmelCase_ : List[str] = True
for i in range(0 , len(snake_case__) - 1 , 2): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowerCAmelCase_ : Optional[int] = False
for i in range(1 , len(snake_case__) - 1 , 2): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowerCAmelCase_ : Any = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
_lowercase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_lowercase = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 683 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683 | 1 |
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = [chr(i + 65) for i in range(26)]
# Remove duplicate characters from key
lowerCAmelCase_ : Optional[int] = remove_duplicates(key.upper())
lowerCAmelCase_ : Union[str, Any] = len(snake_case__)
# First fill cipher with key characters
lowerCAmelCase_ : str = {alphabet[i]: char for i, char in enumerate(snake_case__)}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(snake_case__) , 26):
lowerCAmelCase_ : Optional[int] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase_ : int = alphabet[i - offset]
lowerCAmelCase_ : Any = char
return cipher_alphabet
def UpperCamelCase ( snake_case__ , snake_case__):
return "".join(cipher_map.get(snake_case__ , snake_case__) for ch in message.upper())
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(snake_case__ , snake_case__) for ch in message.upper())
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = input("Enter message to encode or decode: ").strip()
lowerCAmelCase_ : List[str] = input("Enter keyword: ").strip()
lowerCAmelCase_ : Any = input("Encipher or decipher? E/D:").strip()[0].lower()
try:
lowerCAmelCase_ : str = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option")
lowerCAmelCase_ : List[str] = create_cipher_map(snake_case__)
print(func(snake_case__ , snake_case__))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 |
import pytest
_lowercase = '''__dummy_dataset1__'''
_lowercase = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = dataset_loading_script_name
lowerCAmelCase_ : List[str] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case__)
lowerCAmelCase_ : List[Any] = script_dir / F'''{script_name}.py'''
with open(snake_case__ , "w") as f:
f.write(snake_case__)
return str(snake_case__)
| 683 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
return model
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = self.dummy_uncond_unet
lowerCAmelCase_ : Any = PNDMScheduler()
lowerCAmelCase_ : List[str] = PNDMPipeline(unet=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ )
pndm.to(lowerCAmelCase__ )
pndm.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = pndm(generator=lowerCAmelCase__ ,num_inference_steps=20 ,output_type="numpy" ).images
lowerCAmelCase_ : int = torch.manual_seed(0 )
lowerCAmelCase_ : int = pndm(generator=lowerCAmelCase__ ,num_inference_steps=20 ,output_type="numpy" ,return_dict=lowerCAmelCase__ )[0]
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "google/ddpm-cifar10-32"
lowerCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = PNDMScheduler()
lowerCAmelCase_ : Dict = PNDMPipeline(unet=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ )
pndm.to(lowerCAmelCase__ )
pndm.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = pndm(generator=lowerCAmelCase__ ,output_type="numpy" ).images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 683 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = CodeGenTokenizer
UpperCamelCase_ = CodeGenTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = {'add_prefix_space': True}
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : int = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "lower newer"
lowerCAmelCase_ : Tuple = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase_ : Dict = "lower newer"
lowerCAmelCase_ : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Tuple = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing the unknown token
lowerCAmelCase_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,*lowerCAmelCase__ : List[str] ,**lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : str = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(lowerCAmelCase__ ,padding="max_length" ,max_length=30 ,return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
lowerCAmelCase_ : Any = tokenizer(*lowerCAmelCase__ ,padding="max_length" ,max_length=60 ,return_tensors="np" )
lowerCAmelCase_ : Optional[int] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = "$$$"
lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowerCAmelCase__ ,add_bos_token=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : int = tokenizer.bos_token_id
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] ,lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : List[str] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCAmelCase_ : str = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase_ : int = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ ,truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
| 683 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = KandinskyVaaControlnetImgaImgPipeline
UpperCamelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
UpperCamelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
UpperCamelCase_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCamelCase_ = False
@property
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return 1_00
@property
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Any = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase_ : List[Any] = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = self.dummy_unet
lowerCAmelCase_ : Optional[Any] = self.dummy_movq
lowerCAmelCase_ : List[str] = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCAmelCase_ : Optional[Any] = DDIMScheduler(**lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Any=0 ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
lowerCAmelCase_ : List[str] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowerCAmelCase_ : Any = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create hint
lowerCAmelCase_ : Dict = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : str = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = "cpu"
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : List[Any] = self.pipeline_class(**lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = output.images
lowerCAmelCase_ : Optional[Any] = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) ,return_dict=lowerCAmelCase__ ,)[0]
lowerCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Optional[int] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
lowerCAmelCase_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase_ : Union[str, Any] = init_image.resize((5_12, 5_12) )
lowerCAmelCase_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 255.0
lowerCAmelCase_ : Any = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
lowerCAmelCase_ : int = "A robot, 4k photo"
lowerCAmelCase_ : str = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" ,torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" ,torch_dtype=torch.floataa )
lowerCAmelCase_ : List[str] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = pipe_prior(
lowerCAmelCase__ ,image=lowerCAmelCase__ ,strength=0.85 ,generator=lowerCAmelCase__ ,negative_prompt="" ,).to_tuple()
lowerCAmelCase_ : List[Any] = pipeline(
image=lowerCAmelCase__ ,image_embeds=lowerCAmelCase__ ,negative_image_embeds=lowerCAmelCase__ ,hint=lowerCAmelCase__ ,generator=lowerCAmelCase__ ,num_inference_steps=1_00 ,height=5_12 ,width=5_12 ,strength=0.5 ,output_type="np" ,)
lowerCAmelCase_ : Tuple = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCAmelCase__ ,lowerCAmelCase__ )
| 683 |
from __future__ import annotations
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : int | None = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = random()
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Any ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 )
def __str__( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = str(self.value ) + " "
lowerCAmelCase_ : List[Any] = str(self.left or "" )
lowerCAmelCase_ : Union[str, Any] = str(self.right or "" )
return value + left + right
def UpperCamelCase ( snake_case__ , snake_case__):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ : Any = split(root.left , snake_case__)
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = split(root.right , snake_case__)
return root, right
def UpperCamelCase ( snake_case__ , snake_case__):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ : Dict = merge(left.right , snake_case__)
return left
else:
lowerCAmelCase_ : List[str] = merge(snake_case__ , right.left)
return right
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = Node(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = split(snake_case__ , snake_case__)
return merge(merge(snake_case__ , snake_case__) , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = split(snake_case__ , value - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = split(snake_case__ , snake_case__)
return merge(snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=",")
inorder(root.right)
def UpperCamelCase ( snake_case__ , snake_case__):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ : List[str] = insert(snake_case__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ : Optional[int] = erase(snake_case__ , int(arg[1:]))
else:
print("Unknown command")
return root
def UpperCamelCase ( ):
lowerCAmelCase_ : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. ")
lowerCAmelCase_ : str = input()
while args != "q":
lowerCAmelCase_ : int = interact_treap(snake_case__ , snake_case__)
print(snake_case__)
lowerCAmelCase_ : str = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_lowercase = TypeVar('''T''')
class __snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : T ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = data
lowerCAmelCase_ : Node[T] | None = None
def __str__( self : Optional[int] ) -> str:
'''simple docstring'''
return f'''{self.data}'''
class __snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self : str ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Node[T] | None = None
def __iter__( self : str ) -> Iterator[T]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.top
while node:
yield node.data
lowerCAmelCase_ : str = node.next
def __str__( self : List[str] ) -> str:
'''simple docstring'''
return "->".join([str(lowerCAmelCase__ ) for item in self] )
def __len__( self : Dict ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def UpperCAmelCase_ ( self : List[Any] ) -> bool:
'''simple docstring'''
return self.top is None
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : T ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = Node(lowerCAmelCase__ )
if not self.is_empty():
lowerCAmelCase_ : Dict = self.top
lowerCAmelCase_ : Optional[int] = node
def UpperCAmelCase_ ( self : List[Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top ,lowerCAmelCase__ )
lowerCAmelCase_ : int = self.top
lowerCAmelCase_ : Union[str, Any] = self.top.next
return pop_node.data
def UpperCAmelCase_ ( self : Optional[int] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowercase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {f"funnel-transformer/{name}": 512 for name in _model_names}
_lowercase = {f"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = 2
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : List[str]="<unk>" ,lowerCAmelCase__ : int="<sep>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : List[str]="<cls>" ,lowerCAmelCase__ : Optional[int]="<mask>" ,lowerCAmelCase__ : Union[str, Any]="<s>" ,lowerCAmelCase__ : List[str]="</s>" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : List[Any]="##" ,**lowerCAmelCase__ : int ,) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,clean_text=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,wordpieces_prefix=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[int] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : int = do_lower_case
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : str=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : str = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 | 1 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_lowercase = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__):
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested")
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested")
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested")
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment")
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate")
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule")
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__)
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ : int = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag('''IGNORE_RESULT''')
_lowercase = doctest.OutputChecker
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 683 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
import collections
import os
import re
from pathlib import Path
_lowercase = '''src/transformers'''
# Matches is_xxx_available()
_lowercase = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase = re.compile(r'''^\s*try:''')
# Catches a line with else:
_lowercase = re.compile(r'''^\s*else:''')
def UpperCamelCase ( snake_case__):
if _re_test_backend.search(snake_case__) is None:
return None
lowerCAmelCase_ : Optional[Any] = [b[0] for b in _re_backend.findall(snake_case__)]
backends.sort()
return "_and_".join(snake_case__)
def UpperCamelCase ( snake_case__):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Any = f.readlines()
lowerCAmelCase_ : int = 0
while line_index < len(snake_case__) and not lines[line_index].startswith("_import_structure = {"):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase_ : Union[str, Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None:
lowerCAmelCase_ : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__):
lowerCAmelCase_ : List[Any] = _re_one_line_import_struct.search(snake_case__).groups()[0]
lowerCAmelCase_ : List[str] = re.findall(R"\[([^\]]+)\]" , snake_case__)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", ")])
line_index += 1
continue
lowerCAmelCase_ : int = _re_import_struct_key_value.search(snake_case__)
if single_line_import_search is not None:
lowerCAmelCase_ : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(snake_case__) > 0]
objects.extend(snake_case__)
elif line.startswith(" " * 8 + "\""):
objects.append(line[9:-3])
line_index += 1
lowerCAmelCase_ : str = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING"):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase_ : List[Any] = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
lowerCAmelCase_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
lowerCAmelCase_ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4):
lowerCAmelCase_ : Tuple = lines[line_index]
if _re_import_struct_add_one.search(snake_case__) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__).groups()[0])
elif _re_import_struct_add_many.search(snake_case__) is not None:
lowerCAmelCase_ : Dict = _re_import_struct_add_many.search(snake_case__).groups()[0].split(", ")
lowerCAmelCase_ : Optional[int] = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_between_brackets.search(snake_case__) is not None:
lowerCAmelCase_ : Any = _re_between_brackets.search(snake_case__).groups()[0].split(", ")
lowerCAmelCase_ : Optional[int] = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_quote_object.search(snake_case__) is not None:
objects.append(_re_quote_object.search(snake_case__).groups()[0])
elif line.startswith(" " * 8 + "\""):
objects.append(line[9:-3])
elif line.startswith(" " * 12 + "\""):
objects.append(line[13:-3])
line_index += 1
lowerCAmelCase_ : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase_ : Optional[Any] = []
while (
line_index < len(snake_case__)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("else")
):
lowerCAmelCase_ : Optional[Any] = lines[line_index]
lowerCAmelCase_ : Tuple = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 8):
objects.append(line[8:-2])
line_index += 1
lowerCAmelCase_ : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase_ : List[Any] = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
lowerCAmelCase_ : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
lowerCAmelCase_ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
lowerCAmelCase_ : Optional[Any] = lines[line_index]
lowerCAmelCase_ : int = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
lowerCAmelCase_ : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase ( snake_case__ , snake_case__):
def find_duplicates(snake_case__):
return [k for k, v in collections.Counter(snake_case__).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase_ : Tuple = []
for key in import_dict_objects.keys():
lowerCAmelCase_ : Dict = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''')
lowerCAmelCase_ : Union[str, Any] = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
lowerCAmelCase_ : Tuple = "base imports" if key == "none" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''')
return errors
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = []
for root, _, files in os.walk(snake_case__):
if "__init__.py" in files:
lowerCAmelCase_ : int = os.path.join(snake_case__ , "__init__.py")
lowerCAmelCase_ : str = parse_init(snake_case__)
if objects is not None:
lowerCAmelCase_ : Tuple = analyze_results(*snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Dict = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(snake_case__))
if len(snake_case__) > 0:
raise ValueError("\n\n".join(snake_case__))
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = []
for path, directories, files in os.walk(snake_case__):
for folder in directories:
# Ignore private modules
if folder.startswith("_"):
directories.remove(snake_case__)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__) / folder).glob("*.py"))) == 0:
continue
lowerCAmelCase_ : Union[str, Any] = str((Path(snake_case__) / folder).relative_to(snake_case__))
lowerCAmelCase_ : List[Any] = short_path.replace(os.path.sep , ".")
submodules.append(snake_case__)
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase_ : Dict = str((Path(snake_case__) / fname).relative_to(snake_case__))
lowerCAmelCase_ : Any = short_path.replace(".py" , "").replace(os.path.sep , ".")
if len(submodule.split(".")) == 1:
submodules.append(snake_case__)
return submodules
_lowercase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCamelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowerCAmelCase_ : int = direct_transformers_import(snake_case__)
lowerCAmelCase_ : Optional[int] = set(transformers._import_structure.keys())
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(snake_case__ , "__init__.py") , "r") as f:
lowerCAmelCase_ : Any = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , snake_case__)))
lowerCAmelCase_ : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(snake_case__) > 0:
lowerCAmelCase_ : str = "\n".join(F'''- {module}''' for module in module_not_registered)
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.")
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 683 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683 | 1 |
import math
def UpperCamelCase ( snake_case__ , snake_case__ = 0 , snake_case__ = 0):
lowerCAmelCase_ : List[str] = end or len(snake_case__)
for i in range(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = i
lowerCAmelCase_ : Optional[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCAmelCase_ : Dict = array[temp_index - 1]
temp_index -= 1
lowerCAmelCase_ : Optional[int] = temp_index_value
return array
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__): # Max Heap
lowerCAmelCase_ : Optional[Any] = index
lowerCAmelCase_ : int = 2 * index + 1 # Left Node
lowerCAmelCase_ : Optional[Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCAmelCase_ : Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCAmelCase_ : Union[str, Any] = right_index
if largest != index:
lowerCAmelCase_ , lowerCAmelCase_ : Any = array[largest], array[index]
heapify(snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = len(snake_case__)
for i in range(n // 2 , -1 , -1):
heapify(snake_case__ , snake_case__ , snake_case__)
for i in range(n - 1 , 0 , -1):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = array[0], array[i]
heapify(snake_case__ , 0 , snake_case__)
return array
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Any = low
lowerCAmelCase_ : List[str] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = array[j], array[i]
i += 1
def UpperCamelCase ( snake_case__):
if len(snake_case__) == 0:
return array
lowerCAmelCase_ : List[Any] = 2 * math.ceil(math.loga(len(snake_case__)))
lowerCAmelCase_ : Any = 16
return intro_sort(snake_case__ , 0 , len(snake_case__) , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(snake_case__)
max_depth -= 1
lowerCAmelCase_ : int = median_of_a(snake_case__ , snake_case__ , start + ((end - start) // 2) + 1 , end - 1)
lowerCAmelCase_ : Union[str, Any] = partition(snake_case__ , snake_case__ , snake_case__ , snake_case__)
intro_sort(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : int = p
return insertion_sort(snake_case__ , snake_case__ , snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input('''Enter numbers separated by a comma : ''').strip()
_lowercase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 683 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = top_db
lowerCAmelCase_ : str = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : str = fft_window_size
lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : Any = max_length_s
lowerCAmelCase_ : int = max_length_s * sampling_rate
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : int = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,)
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase_ : str = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length
lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
lowerCAmelCase_ : Dict = waveform[idx : idx + max_length]
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 )
lowerCAmelCase_ : int = False
else:
lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : Optional[Any] = [
self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = True
if isinstance(input_mel[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 683 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = AlbertTokenizer
UpperCamelCase_ = AlbertTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Optional[Any] = AlbertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = "this is a test"
lowerCAmelCase_ : Any = "this is a test"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = "<pad>"
lowerCAmelCase_ : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) ,lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"▁eloquent" )
self.assertEqual(len(lowerCAmelCase__ ) ,3_00_00 )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Dict = self.get_tokenizer()
lowerCAmelCase_ : Any = self.get_rust_tokenizer()
lowerCAmelCase_ : Optional[Any] = "I was born in 92000, and this is falsé."
lowerCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
lowerCAmelCase_ : int = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = AlbertTokenizer(lowerCAmelCase__ ,keep_accents=lowerCAmelCase__ )
lowerCAmelCase_ : Any = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ ,["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,[48, 25, 21, 12_89] )
lowerCAmelCase_ : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ ,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
lowerCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,[31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
lowerCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ ,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] ,)
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Dict = AlbertTokenizer(lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.encode("sequence builders" )
lowerCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" )
lowerCAmelCase_ : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ,lowerCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ ,model_name="albert-base-v2" ,revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" ,)
| 683 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase_ : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase_ : Any = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase_ : Tuple = Pipe()
lowerCAmelCase_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
lowerCAmelCase_ : int = temp_rs
lowerCAmelCase_ : List[Any] = temp_rr
for i in range(1 , len(snake_case__) - 1):
lowerCAmelCase_ : Dict = Pipe()
lowerCAmelCase_ : List[str] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
lowerCAmelCase_ : Dict = temp_rs
lowerCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
lowerCAmelCase_ : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = list(range(10 , 0 , -1))
print("Initial List")
print(*snake_case__)
lowerCAmelCase_ : Tuple = odd_even_transposition(snake_case__)
print("Sorted List\n")
print(*snake_case__)
if __name__ == "__main__":
main()
| 683 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = UNetaDModel(
sample_size=(32, 64) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=("AttnDownBlock2D", "DownBlock2D") ,up_block_types=("UpBlock2D", "AttnUpBlock2D") ,)
return model
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = UNetaDConditionModel(
sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") ,up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") ,cross_attention_dim=10 ,)
return model
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : str = AutoencoderKL(
sample_size=(1_28, 64) ,in_channels=1 ,out_channels=1 ,latent_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") ,up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") ,)
lowerCAmelCase_ : Optional[int] = UNetaDModel(
sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=("AttnDownBlock2D", "DownBlock2D") ,up_block_types=("UpBlock2D", "AttnUpBlock2D") ,)
return vqvae, unet
@slow
def UpperCAmelCase_ ( self : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Dict = Mel(
x_res=self.dummy_unet.config.sample_size[1] ,y_res=self.dummy_unet.config.sample_size[0] ,)
lowerCAmelCase_ : Union[str, Any] = DDPMScheduler()
lowerCAmelCase_ : List[str] = AudioDiffusionPipeline(vqvae=lowerCAmelCase__ ,unet=self.dummy_unet ,mel=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(42 )
lowerCAmelCase_ : str = pipe(generator=lowerCAmelCase__ ,steps=4 )
lowerCAmelCase_ : Optional[int] = output.audios[0]
lowerCAmelCase_ : Optional[int] = output.images[0]
lowerCAmelCase_ : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(42 )
lowerCAmelCase_ : Any = pipe(generator=lowerCAmelCase__ ,steps=4 ,return_dict=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCAmelCase_ : Optional[Any] = np.frombuffer(image.tobytes() ,dtype="uint8" )[:10]
lowerCAmelCase_ : Union[str, Any] = np.frombuffer(image_from_tuple.tobytes() ,dtype="uint8" )[:10]
lowerCAmelCase_ : List[str] = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCAmelCase_ : List[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] ,y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] ,)
lowerCAmelCase_ : List[str] = DDIMScheduler()
lowerCAmelCase_ : int = self.dummy_vqvae_and_unet
lowerCAmelCase_ : Optional[int] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=dummy_vqvae_and_unet[1] ,mel=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
np.random.seed(0 )
lowerCAmelCase_ : Any = np.random.uniform(-1 ,1 ,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCAmelCase_ : List[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(42 )
lowerCAmelCase_ : Union[str, Any] = pipe(raw_audio=lowerCAmelCase__ ,generator=lowerCAmelCase__ ,start_step=5 ,steps=10 )
lowerCAmelCase_ : Dict = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCAmelCase_ : str = np.frombuffer(image.tobytes() ,dtype="uint8" )[:10]
lowerCAmelCase_ : Any = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCAmelCase_ : Union[str, Any] = self.dummy_unet_condition
lowerCAmelCase_ : Tuple = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=lowerCAmelCase__ ,mel=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ )
lowerCAmelCase_ : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
np.random.seed(0 )
lowerCAmelCase_ : List[Any] = torch.rand((1, 1, 10) )
lowerCAmelCase_ : List[Any] = pipe(generator=lowerCAmelCase__ ,encoding=lowerCAmelCase__ )
lowerCAmelCase_ : str = output.images[0]
lowerCAmelCase_ : Union[str, Any] = np.frombuffer(image.tobytes() ,dtype="uint8" )[:10]
lowerCAmelCase_ : List[Any] = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = torch_device
lowerCAmelCase_ : int = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowerCAmelCase_ : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(42 )
lowerCAmelCase_ : Any = pipe(generator=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = output.audios[0]
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCAmelCase_ : int = np.frombuffer(image.tobytes() ,dtype="uint8" )[:10]
lowerCAmelCase_ : Tuple = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 |
from typing import Any
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__)):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Tuple = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Any = arg_max
# The final observation
lowerCAmelCase_ : List[Any] = observations_space[len(snake_case__) - 1]
# argmax for given final observation
lowerCAmelCase_ : List[str] = ""
lowerCAmelCase_ : List[str] = -1
for k_state in states_space:
lowerCAmelCase_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Tuple = k_state
lowerCAmelCase_ : str = arg_max
# Process pointers backwards
lowerCAmelCase_ : int = last_state
lowerCAmelCase_ : int = []
for o in range(len(snake_case__) - 1 , -1 , -1):
result.append(snake_case__)
lowerCAmelCase_ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__)
_validate_dicts(
snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_list(snake_case__ , "observations_space")
_validate_list(snake_case__ , "states_space")
def UpperCamelCase ( snake_case__ , snake_case__):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list'''
raise ValueError(snake_case__)
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
_validate_dict(snake_case__ , "initial_probabilities" , snake_case__)
_validate_nested_dict(snake_case__ , "transition_probabilities")
_validate_nested_dict(snake_case__ , "emission_probabilities")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_dict(_object , snake_case__ , snake_case__)
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object):
lowerCAmelCase_ : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object.values()):
lowerCAmelCase_ : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase ( snake_case__ , snake_case__=False):
lowerCAmelCase_ : Union[str, Any] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias'''))
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
])
return rename_keys
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCAmelCase_ : str = ""
else:
lowerCAmelCase_ : Optional[Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : Optional[Any] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''')
lowerCAmelCase_ : Optional[Any] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : List[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : str = dct.pop(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Union[str, Any] = val
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Any = ViTMSNConfig()
lowerCAmelCase_ : Tuple = 10_00
lowerCAmelCase_ : Optional[int] = "datasets/huggingface/label-files"
lowerCAmelCase_ : List[Any] = "imagenet-1k-id2label.json"
lowerCAmelCase_ : Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) , "r"))
lowerCAmelCase_ : Tuple = {int(SCREAMING_SNAKE_CASE_): v for k, v in idalabel.items()}
lowerCAmelCase_ : Union[str, Any] = idalabel
lowerCAmelCase_ : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase_ : Tuple = 3_84
lowerCAmelCase_ : Union[str, Any] = 15_36
lowerCAmelCase_ : Optional[int] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase_ : Optional[Any] = 10_24
lowerCAmelCase_ : List[str] = 40_96
lowerCAmelCase_ : Tuple = 24
lowerCAmelCase_ : Dict = 16
lowerCAmelCase_ : Tuple = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase_ : List[str] = 4
elif "l7" in checkpoint_url:
lowerCAmelCase_ : Union[str, Any] = 7
lowerCAmelCase_ : Optional[Any] = 10_24
lowerCAmelCase_ : str = 40_96
lowerCAmelCase_ : Dict = 24
lowerCAmelCase_ : Tuple = 16
lowerCAmelCase_ : Tuple = 0.1
lowerCAmelCase_ : List[Any] = ViTMSNModel(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu")["target_encoder"]
lowerCAmelCase_ : Tuple = ViTImageProcessor(size=config.image_size)
remove_projection_head(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Optional[Any] = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_)
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_)
model.load_state_dict(SCREAMING_SNAKE_CASE_)
model.eval()
lowerCAmelCase_ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase_ : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_).raw)
lowerCAmelCase_ : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt")
# forward pass
torch.manual_seed(2)
lowerCAmelCase_ : Tuple = model(**SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase_ : int = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]])
elif "b16" in checkpoint_url:
lowerCAmelCase_ : Dict = torch.tensor([[14.2_889, -18.9_045, 11.7_281]])
elif "l16" in checkpoint_url:
lowerCAmelCase_ : Optional[Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]])
elif "b4" in checkpoint_url:
lowerCAmelCase_ : Any = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]])
else:
lowerCAmelCase_ : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]])
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4)
print(F'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(SCREAMING_SNAKE_CASE_)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_)
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 700 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 0 |
def UpperCamelCase ( snake_case__):
assert column_title.isupper()
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Any = len(__A) - 1
lowerCAmelCase_ : List[Any] = 0
while index >= 0:
lowerCAmelCase_ : Dict = (ord(column_title[index]) - 64) * pow(26 , __A)
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ = """BlipImageProcessor"""
UpperCamelCase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = False
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
lowerCAmelCase_ : List[Any] = self.image_processor
def __call__( self : str ,lowerCAmelCase__ : ImageInput = None ,lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowerCAmelCase__ : bool = True ,lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : int = 0 ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : bool = True ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : Tuple ,) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
lowerCAmelCase_ : List[Any] = self.tokenizer
lowerCAmelCase_ : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
return text_encoding
# add pixel_values
lowerCAmelCase_ : Dict = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
if text is not None:
lowerCAmelCase_ : str = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
else:
lowerCAmelCase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def UpperCAmelCase_ ( self : str ,*lowerCAmelCase__ : Optional[int] ,**lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Optional[Any] ,**lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : int = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 702 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Optional[Any]=99 ,lowerCAmelCase__ : Any=13 ,lowerCAmelCase__ : Union[str, Any]=16 ,lowerCAmelCase__ : Union[str, Any]=7 ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : List[Any]=True ,lowerCAmelCase__ : str=False ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Optional[Any]=2 ,lowerCAmelCase__ : Any=32 ,lowerCAmelCase__ : List[str]=4 ,lowerCAmelCase__ : int=4 ,lowerCAmelCase__ : Tuple=30 ,lowerCAmelCase__ : Any=0 ,lowerCAmelCase__ : int=1 ,lowerCAmelCase__ : Tuple=2 ,lowerCAmelCase__ : Union[str, Any]=None ,) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Optional[Any] = decoder_seq_length
# For common tests
lowerCAmelCase_ : str = self.decoder_seq_length
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : Union[str, Any] = use_attention_mask
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Tuple = d_model
lowerCAmelCase_ : List[Any] = d_model
lowerCAmelCase_ : Any = decoder_layers
lowerCAmelCase_ : List[str] = decoder_layers
lowerCAmelCase_ : Optional[int] = decoder_ffn_dim
lowerCAmelCase_ : List[str] = decoder_attention_heads
lowerCAmelCase_ : Tuple = decoder_attention_heads
lowerCAmelCase_ : int = eos_token_id
lowerCAmelCase_ : Union[str, Any] = bos_token_id
lowerCAmelCase_ : Tuple = pad_token_id
lowerCAmelCase_ : Dict = decoder_start_token_id
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Optional[int] = decoder_seq_length
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Optional[int] = 1
def UpperCAmelCase_ ( self : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
lowerCAmelCase_ : Optional[Any] = None
if self.use_attention_mask:
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.decoder_seq_length] ,vocab_size=2 )
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
lowerCAmelCase_ : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size ,d_model=self.d_model ,decoder_layers=self.decoder_layers ,decoder_ffn_dim=self.decoder_ffn_dim ,decoder_attention_heads=self.decoder_attention_heads ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,use_cache=self.use_cache ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,max_position_embeddings=self.max_position_embeddings ,)
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[int] ,) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : Union[str, Any] = TrOCRDecoder(config=A_ ).to(A_ ).eval()
lowerCAmelCase_ : List[str] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowerCAmelCase_ : Union[str, Any] = model(A_ ,use_cache=A_ )
lowerCAmelCase_ : Dict = model(A_ )
lowerCAmelCase_ : List[Any] = model(A_ ,use_cache=A_ )
self.parent.assertTrue(len(A_ ) == len(A_ ) )
self.parent.assertTrue(len(A_ ) == len(A_ ) + 1 )
lowerCAmelCase_ : str = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ : List[str] = ids_tensor((2, 1) ,config.vocab_size - 1 ) + 1
# append to next input_ids and
lowerCAmelCase_ : Any = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowerCAmelCase_ : List[str] = model(A_ )["last_hidden_state"]
lowerCAmelCase_ : List[str] = model(A_ ,past_key_values=A_ )["last_hidden_state"]
# select random slice
lowerCAmelCase_ : Tuple = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowerCAmelCase_ : Optional[int] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowerCAmelCase_ : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A_ ,A_ ,atol=1e-3 )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = config_and_inputs
lowerCAmelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCamelCase_ = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
UpperCamelCase_ = True
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = TrOCRStandaloneDecoderModelTester(self ,is_training=A_ )
lowerCAmelCase_ : int = ConfigTester(self ,config_class=A_ )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A_ )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
pass
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowercase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = set()
lowerCAmelCase_ : Optional[Any] = []
def parse_line(snake_case__):
for line in fp:
if isinstance(_lowercase , _lowercase):
lowerCAmelCase_ : List[str] = line.decode("UTF-8")
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" "):
# process a single warning and move it to `selected_warnings`.
if len(_lowercase) > 0:
lowerCAmelCase_ : List[str] = '\n'.join(_lowercase)
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets):
selected_warnings.add(_lowercase)
buffer.clear()
continue
else:
lowerCAmelCase_ : Any = line.strip()
buffer.append(_lowercase)
if from_gh:
for filename in os.listdir(_lowercase):
lowerCAmelCase_ : Dict = os.path.join(_lowercase , _lowercase)
if not os.path.isdir(_lowercase):
# read the file
if filename != "warnings.txt":
continue
with open(_lowercase) as fp:
parse_line(_lowercase)
else:
try:
with zipfile.ZipFile(_lowercase) as z:
for filename in z.namelist():
if not os.path.isdir(_lowercase):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowercase) as fp:
parse_line(_lowercase)
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''')
return selected_warnings
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = set()
lowerCAmelCase_ : Any = [os.path.join(_lowercase , _lowercase) for p in os.listdir(_lowercase) if (p.endswith(".zip") or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowercase , _lowercase))
return selected_warnings
if __name__ == "__main__":
def UpperCamelCase ( snake_case__):
return values.split(",")
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
_lowercase = parser.parse_args()
_lowercase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowercase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowercase = extract_warnings(args.output_dir, args.targets)
_lowercase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 704 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( lowercase__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : str=80 ,lowerCAmelCase__ : Optional[int]=1_60_00 ,lowerCAmelCase__ : Optional[int]=80 ,lowerCAmelCase__ : Dict=0.0 ,lowerCAmelCase__ : List[Any]=True ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Optional[int]=True ,**lowerCAmelCase__ : Optional[int] ,) -> Dict:
'''simple docstring'''
super().__init__(feature_size=__lowerCamelCase ,sampling_rate=__lowerCamelCase ,padding_value=__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase_ : Any = num_mel_bins
lowerCAmelCase_ : Any = do_ceptral_normalize
lowerCAmelCase_ : List[str] = normalize_means
lowerCAmelCase_ : List[Any] = normalize_vars
lowerCAmelCase_ : int = True
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : np.ndarray ,) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase_ : Dict = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
lowerCAmelCase_ : Optional[Any] = ta_kaldi.fbank(__lowerCamelCase ,num_mel_bins=self.num_mel_bins ,sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCAmelCase_ ( lowerCAmelCase__ : np.ndarray ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[bool] = True ,lowerCAmelCase__ : Optional[bool] = True ,lowerCAmelCase__ : float = 0.0 ,) -> Dict:
'''simple docstring'''
if normalize_means:
lowerCAmelCase_ : int = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : int = np.subtract(__lowerCamelCase ,__lowerCamelCase )
if normalize_vars:
lowerCAmelCase_ : int = x[:input_length].std(axis=0 )
lowerCAmelCase_ : List[Any] = np.divide(__lowerCamelCase ,__lowerCamelCase )
if input_length < x.shape[0]:
lowerCAmelCase_ : Optional[int] = padding_value
# make sure array is in float32
lowerCAmelCase_ : List[Any] = x.astype(np.floataa )
return x
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : Optional[np.ndarray] = None ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowerCamelCase ,__lowerCamelCase ,self.normalize_means ,self.normalize_vars ,self.padding_value )
for x, n in zip(__lowerCamelCase ,__lowerCamelCase )
]
def __call__( self : Dict ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,**lowerCAmelCase__ : List[str] ,) -> Optional[int]:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : int = isinstance(__lowerCamelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : int = is_batched_numpy or (
isinstance(__lowerCamelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Optional[Any] = [np.asarray(__lowerCamelCase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase ,np.ndarray ):
lowerCAmelCase_ : List[Any] = np.asarray(__lowerCamelCase ,dtype=np.floataa )
elif isinstance(__lowerCamelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Dict = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Dict = [self._extract_fbank_features(__lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : Optional[Any] = BatchFeature({"input_features": features} )
lowerCAmelCase_ : Dict = self.pad(
__lowerCamelCase ,padding=__lowerCamelCase ,max_length=__lowerCamelCase ,truncation=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,**__lowerCamelCase ,)
# make sure list is in array format
lowerCAmelCase_ : Tuple = padded_inputs.get("input_features" )
if isinstance(input_features[0] ,__lowerCamelCase ):
lowerCAmelCase_ : List[str] = [np.asarray(__lowerCamelCase ,dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase_ : Tuple = [np.asarray(__lowerCamelCase ,dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase_ : Any = (
np.array(__lowerCamelCase ,dtype=np.intaa )
if self._get_padding_strategies(__lowerCamelCase ,max_length=__lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase_ : List[Any] = self.normalize(
padded_inputs["input_features"] ,attention_mask=__lowerCamelCase )
if return_tensors is not None:
lowerCAmelCase_ : Optional[Any] = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 705 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 706 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_lowercase ):
lowerCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase ,_lowercase )
lowerCAmelCase_ : int = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase ,_lowercase )
@slow
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_lowercase ):
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase ,_lowercase )
lowerCAmelCase_ : Optional[Any] = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase ,_lowercase )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(_lowercase )
lowerCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_lowercase )
lowerCAmelCase_ : int = tokenizer("Do you support jax jitted function?" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ : Tuple ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
@slow
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase )
lowerCAmelCase_ : Optional[int] = FlaxRobertaModel.from_pretrained(_lowercase )
lowerCAmelCase_ : int = tokenizer("Do you support jax jitted function?" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ : Tuple ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase ,"bert-base is not a local folder and is not a valid model identifier" ):
lowerCAmelCase_ : List[Any] = FlaxAutoModel.from_pretrained("bert-base" )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase ,R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCAmelCase_ : Union[str, Any] = FlaxAutoModel.from_pretrained(_lowercase ,revision="aaaaaa" )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase ,"hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" ,):
lowerCAmelCase_ : Union[str, Any] = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(_lowercase ,"Use `from_pt=True` to load this model" ):
lowerCAmelCase_ : Optional[Any] = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 707 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor")
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor")
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor")
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 0 |
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 709 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = HfArgumentParser(snake_case__)
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 683 | 0 |
'''simple docstring'''
_lowercase = "Input must be a string of 8 numbers plus letter"
_lowercase = "TRWAGMYFPDXBNJZSQVHLCKE"
def UpperCamelCase ( snake_case__):
if not isinstance(lowerCamelCase__ , lowerCamelCase__):
lowerCAmelCase_ : Dict = F'''Expected string as input, found {type(lowerCamelCase__).__name__}'''
raise TypeError(lowerCamelCase__)
lowerCAmelCase_ : Tuple = spanish_id.replace("-" , "").upper()
if len(lowerCamelCase__) != 9:
raise ValueError(lowerCamelCase__)
try:
lowerCAmelCase_ : Dict = int(spanish_id_clean[0:8])
lowerCAmelCase_ : Dict = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCamelCase__) from ex
if letter.isdigit():
raise ValueError(lowerCamelCase__)
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__):
assert type(snake_case__) in (int, float) and decimal == int(snake_case__)
lowerCAmelCase_ : Optional[Any] = int(snake_case__)
lowerCAmelCase_ : Tuple = ""
lowerCAmelCase_ : str = False
if decimal < 0:
lowerCAmelCase_ : Tuple = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Any = divmod(snake_case__ , 16)
lowerCAmelCase_ : Dict = values[remainder] + hexadecimal
lowerCAmelCase_ : List[str] = "0x" + hexadecimal
if negative:
lowerCAmelCase_ : Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'perceiver'
def __init__( self : Tuple ,lowerCAmelCase__ : Tuple=2_56 ,lowerCAmelCase__ : Dict=12_80 ,lowerCAmelCase__ : str=7_68 ,lowerCAmelCase__ : int=1 ,lowerCAmelCase__ : List[Any]=26 ,lowerCAmelCase__ : Optional[int]=8 ,lowerCAmelCase__ : str=8 ,lowerCAmelCase__ : Tuple=None ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : int="kv" ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : str=1 ,lowerCAmelCase__ : Union[str, Any]="gelu" ,lowerCAmelCase__ : Optional[Any]=0.1 ,lowerCAmelCase__ : Any=0.02 ,lowerCAmelCase__ : List[Any]=1e-1_2 ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Union[str, Any]=2_62 ,lowerCAmelCase__ : Tuple=20_48 ,lowerCAmelCase__ : Any=56 ,lowerCAmelCase__ : Optional[int]=[3_68, 4_96] ,lowerCAmelCase__ : Optional[Any]=16 ,lowerCAmelCase__ : Optional[Any]=19_20 ,lowerCAmelCase__ : Any=16 ,lowerCAmelCase__ : Union[str, Any]=[1, 16, 2_24, 2_24] ,**lowerCAmelCase__ : str ,) -> int:
'''simple docstring'''
super().__init__(**__A )
lowerCAmelCase_ : str = num_latents
lowerCAmelCase_ : List[str] = d_latents
lowerCAmelCase_ : Optional[Any] = d_model
lowerCAmelCase_ : Tuple = num_blocks
lowerCAmelCase_ : Union[str, Any] = num_self_attends_per_block
lowerCAmelCase_ : Dict = num_self_attention_heads
lowerCAmelCase_ : List[Any] = num_cross_attention_heads
lowerCAmelCase_ : int = qk_channels
lowerCAmelCase_ : Dict = v_channels
lowerCAmelCase_ : Optional[int] = cross_attention_shape_for_attention
lowerCAmelCase_ : str = self_attention_widening_factor
lowerCAmelCase_ : List[str] = cross_attention_widening_factor
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : str = use_query_residual
# masked language modeling attributes
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : List[Any] = max_position_embeddings
# image classification attributes
lowerCAmelCase_ : Dict = image_size
# flow attributes
lowerCAmelCase_ : str = train_size
# multimodal autoencoding attributes
lowerCAmelCase_ : Any = num_frames
lowerCAmelCase_ : Dict = audio_samples_per_frame
lowerCAmelCase_ : Tuple = samples_per_patch
lowerCAmelCase_ : int = output_shape
class __snake_case ( snake_case__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase_ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return 1e-4
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,lowerCAmelCase__ : int = -1 ,lowerCAmelCase__ : int = -1 ,lowerCAmelCase__ : int = -1 ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[TensorType] = None ,lowerCAmelCase__ : int = 3 ,lowerCAmelCase__ : int = 40 ,lowerCAmelCase__ : int = 40 ,) -> Optional[Any]:
'''simple docstring'''
if isinstance(__A ,__A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Dict = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : str = preprocessor.num_special_tokens_to_add(__A )
lowerCAmelCase_ : Any = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : int = [" ".join(["a"] ) * seq_length] * batch_size
lowerCAmelCase_ : int = dict(preprocessor(__A ,return_tensors=__A ) )
lowerCAmelCase_ : int = inputs.pop("input_ids" )
return inputs
elif isinstance(__A ,__A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : int = compute_effective_axis_dimension(__A ,fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCAmelCase_ : Tuple = self._generate_dummy_images(__A ,__A ,__A ,__A )
lowerCAmelCase_ : Any = dict(preprocessor(images=__A ,return_tensors=__A ) )
lowerCAmelCase_ : Optional[int] = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 711 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = {}
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : List[str] = {}
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(_a )
if nodea not in self.connections:
self.add_node(_a )
lowerCAmelCase_ : int = probability
def UpperCAmelCase_ ( self : Union[str, Any] ) -> list[str]:
'''simple docstring'''
return list(self.connections )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Any = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase_ : str = Counter(graph.get_nodes())
lowerCAmelCase_ : Optional[Any] = start
for _ in range(__lowerCAmelCase):
lowerCAmelCase_ : Optional[Any] = graph.transition(__lowerCAmelCase)
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 712 |
import pytest
_lowercase = '''__dummy_dataset1__'''
_lowercase = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = dataset_loading_script_name
lowerCAmelCase_ : List[str] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case__)
lowerCAmelCase_ : List[Any] = script_dir / F'''{script_name}.py'''
with open(snake_case__ , "w") as f:
f.write(snake_case__)
return str(snake_case__)
| 683 | 0 |
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = [int(__UpperCAmelCase) for i in ip_va_address.split(".") if i.isdigit()]
return len(__UpperCAmelCase) == 4 and all(0 <= int(__UpperCAmelCase) <= 2_54 for octet in octets)
if __name__ == "__main__":
_lowercase = input().strip()
_lowercase = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 713 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = CodeGenTokenizer
UpperCamelCase_ = CodeGenTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = {'add_prefix_space': True}
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : int = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "lower newer"
lowerCAmelCase_ : Tuple = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase_ : Dict = "lower newer"
lowerCAmelCase_ : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Tuple = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing the unknown token
lowerCAmelCase_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,*lowerCAmelCase__ : List[str] ,**lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : str = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(lowerCAmelCase__ ,padding="max_length" ,max_length=30 ,return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
lowerCAmelCase_ : Any = tokenizer(*lowerCAmelCase__ ,padding="max_length" ,max_length=60 ,return_tensors="np" )
lowerCAmelCase_ : Optional[int] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = "$$$"
lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowerCAmelCase__ ,add_bos_token=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : int = tokenizer.bos_token_id
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] ,lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : List[str] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCAmelCase_ : str = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase_ : int = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ ,truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
| 683 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowercase = '''__DUMMY_TRANSFORMERS_USER__'''
_lowercase = '''Dummy User'''
_lowercase = '''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'''
_lowercase = '''https://hub-ci.huggingface.co'''
_lowercase = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowercase = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowercase = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def UpperCamelCase ( snake_case__):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCAmelCase__)
@pytest.fixture
def UpperCamelCase ( snake_case__):
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCAmelCase__)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCAmelCase__)
@pytest.fixture
def UpperCamelCase ( snake_case__):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCAmelCase__)
@pytest.fixture
def UpperCamelCase ( snake_case__ , snake_case__):
HfFolder.save_token(lowerCAmelCase__)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def UpperCamelCase ( ):
return HfApi(endpoint=lowerCAmelCase__)
@pytest.fixture(scope="session")
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = HfFolder.get_token()
HfFolder.save_token(lowerCAmelCase__)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCAmelCase__)
@pytest.fixture
def UpperCamelCase ( snake_case__):
def _cleanup_repo(snake_case__):
hf_api.delete_repo(lowerCAmelCase__ , token=lowerCAmelCase__ , repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def UpperCamelCase ( snake_case__):
@contextmanager
def _temporary_repo(snake_case__):
try:
yield repo_id
finally:
cleanup_repo(lowerCAmelCase__)
return _temporary_repo
@pytest.fixture(scope="session")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = F'''repo_txt_data-{int(time.time() * 10e3)}'''
lowerCAmelCase_ : Optional[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(lowerCAmelCase__ , token=lowerCAmelCase__ , repo_type="dataset" , private=lowerCAmelCase__)
hf_api.upload_file(
token=lowerCAmelCase__ , path_or_fileobj=str(lowerCAmelCase__) , path_in_repo="data/text_data.txt" , repo_id=lowerCAmelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase__ , token=lowerCAmelCase__ , repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Any = F'''repo_zipped_txt_data-{int(time.time() * 10e3)}'''
lowerCAmelCase_ : Any = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(lowerCAmelCase__ , token=lowerCAmelCase__ , repo_type="dataset" , private=lowerCAmelCase__)
hf_api.upload_file(
token=lowerCAmelCase__ , path_or_fileobj=str(lowerCAmelCase__) , path_in_repo="data.zip" , repo_id=lowerCAmelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase__ , token=lowerCAmelCase__ , repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : str = F'''repo_zipped_img_data-{int(time.time() * 10e3)}'''
lowerCAmelCase_ : List[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(lowerCAmelCase__ , token=lowerCAmelCase__ , repo_type="dataset" , private=lowerCAmelCase__)
hf_api.upload_file(
token=lowerCAmelCase__ , path_or_fileobj=str(lowerCAmelCase__) , path_in_repo="data.zip" , repo_id=lowerCAmelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase__ , token=lowerCAmelCase__ , repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return hf_private_dataset_repo_zipped_img_data_
| 714 |
from __future__ import annotations
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : int | None = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = random()
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Any ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 )
def __str__( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = str(self.value ) + " "
lowerCAmelCase_ : List[Any] = str(self.left or "" )
lowerCAmelCase_ : Union[str, Any] = str(self.right or "" )
return value + left + right
def UpperCamelCase ( snake_case__ , snake_case__):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ : Any = split(root.left , snake_case__)
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = split(root.right , snake_case__)
return root, right
def UpperCamelCase ( snake_case__ , snake_case__):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ : Dict = merge(left.right , snake_case__)
return left
else:
lowerCAmelCase_ : List[str] = merge(snake_case__ , right.left)
return right
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = Node(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = split(snake_case__ , snake_case__)
return merge(merge(snake_case__ , snake_case__) , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = split(snake_case__ , value - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = split(snake_case__ , snake_case__)
return merge(snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=",")
inorder(root.right)
def UpperCamelCase ( snake_case__ , snake_case__):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ : List[str] = insert(snake_case__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ : Optional[int] = erase(snake_case__ , int(arg[1:]))
else:
print("Unknown command")
return root
def UpperCamelCase ( ):
lowerCAmelCase_ : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. ")
lowerCAmelCase_ : str = input()
while args != "q":
lowerCAmelCase_ : int = interact_treap(snake_case__ , snake_case__)
print(snake_case__)
lowerCAmelCase_ : str = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict=13 ,lowerCAmelCase__ : Optional[int]=32 ,lowerCAmelCase__ : Union[str, Any]=3 ,lowerCAmelCase__ : Any=4 ,lowerCAmelCase__ : int=[10, 20, 30, 40] ,lowerCAmelCase__ : Optional[int]=[2, 2, 3, 2] ,lowerCAmelCase__ : List[Any]=True ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : List[Any]=37 ,lowerCAmelCase__ : List[Any]="gelu" ,lowerCAmelCase__ : Any=10 ,lowerCAmelCase__ : int=0.02 ,lowerCAmelCase__ : Union[str, Any]=["stage2", "stage3", "stage4"] ,lowerCAmelCase__ : Tuple=3 ,lowerCAmelCase__ : Optional[Any]=None ,) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : List[str] = image_size
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Optional[Any] = num_stages
lowerCAmelCase_ : str = hidden_sizes
lowerCAmelCase_ : List[str] = depths
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Union[str, Any] = intermediate_size
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : str = type_sequence_label_size
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : str = out_features
lowerCAmelCase_ : int = num_labels
lowerCAmelCase_ : List[Any] = scope
lowerCAmelCase_ : List[Any] = num_stages
def UpperCAmelCase_ ( self : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Any = None
if self.use_labels:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : str ) -> List[str]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,)
def UpperCAmelCase_ ( self : str ) -> List[Any]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() ,hidden_size=5_12 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=_UpperCamelCase ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=2_56 ,auxiliary_num_convs=1 ,auxiliary_concat_input=_UpperCamelCase ,loss_ignore_index=2_55 ,num_labels=self.num_labels ,)
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : int ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = UperNetForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : List[Any] = config_and_inputs
lowerCAmelCase_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCamelCase_ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = UperNetModelTester(self )
lowerCAmelCase_ : Dict = ConfigTester(self ,config_class=_UpperCamelCase ,has_text_modality=_UpperCamelCase ,hidden_size=37 )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : Any ) -> List[str]:
'''simple docstring'''
return
def UpperCAmelCase_ ( self : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(_UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_UpperCamelCase )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self : Tuple ) -> int:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : List[Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Optional[Any] ):
lowerCAmelCase_ : List[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) )
lowerCAmelCase_ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Dict = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = True
check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : str = True
check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[Any] = _config_zero_init(_UpperCamelCase )
lowerCAmelCase_ : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[Any] = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@unittest.skip(reason="UperNet does not have tied weights" )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg")
lowerCAmelCase_ : Optional[int] = Image.open(__A).convert("RGB")
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
lowerCAmelCase_ : str = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(_UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = prepare_img()
lowerCAmelCase_ : Optional[Any] = processor(images=_UpperCamelCase ,return_tensors="pt" ).to(_UpperCamelCase )
with torch.no_grad():
lowerCAmelCase_ : Any = model(**_UpperCamelCase )
lowerCAmelCase_ : List[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape ,_UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,_UpperCamelCase ,atol=1e-4 ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
lowerCAmelCase_ : Tuple = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(_UpperCamelCase )
lowerCAmelCase_ : Dict = prepare_img()
lowerCAmelCase_ : str = processor(images=_UpperCamelCase ,return_tensors="pt" ).to(_UpperCamelCase )
with torch.no_grad():
lowerCAmelCase_ : str = model(**_UpperCamelCase )
lowerCAmelCase_ : List[str] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape ,_UpperCamelCase )
lowerCAmelCase_ : List[str] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,_UpperCamelCase ,atol=1e-4 ) )
| 715 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowercase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {f"funnel-transformer/{name}": 512 for name in _model_names}
_lowercase = {f"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = 2
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : List[str]="<unk>" ,lowerCAmelCase__ : int="<sep>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : List[str]="<cls>" ,lowerCAmelCase__ : Optional[int]="<mask>" ,lowerCAmelCase__ : Union[str, Any]="<s>" ,lowerCAmelCase__ : List[str]="</s>" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : List[Any]="##" ,**lowerCAmelCase__ : int ,) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,clean_text=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,wordpieces_prefix=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[int] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : int = do_lower_case
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : str=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : str = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( a__ ):
"""simple docstring"""
UpperCamelCase_ = 4_2
UpperCamelCase_ = 4_2
def __init__( self : Tuple ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] ,lowerCAmelCase__ : List[Any] = 1 ,lowerCAmelCase__ : Union[str, Any] = 50 ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Tuple = "pil" ,lowerCAmelCase__ : Any = True ,**lowerCAmelCase__ : Union[str, Any] ,) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.unet.config.sample_size
lowerCAmelCase_ : List[Any] = (batch_size, 3, img_size, img_size)
lowerCAmelCase_ : List[str] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowerCAmelCase_ : Optional[Any] = randn_tensor(_A ,generator=_A ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowerCAmelCase_ : Tuple = self.scheduler.schedule[t]
lowerCAmelCase_ : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowerCAmelCase_ : Any = self.scheduler.add_noise_to_input(_A ,_A ,generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCAmelCase_ : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowerCAmelCase_ : Any = self.scheduler.step(_A ,_A ,_A ,_A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCAmelCase_ : List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
lowerCAmelCase_ : Any = self.scheduler.step_correct(
_A ,_A ,_A ,_A ,step_output.prev_sample ,step_output["derivative"] ,)
lowerCAmelCase_ : List[Any] = step_output.prev_sample
lowerCAmelCase_ : Any = (sample / 2 + 0.5).clamp(0 ,1 )
lowerCAmelCase_ : str = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ : Dict = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 716 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__):
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested")
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested")
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested")
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment")
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate")
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule")
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__)
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ : int = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag('''IGNORE_RESULT''')
_lowercase = doctest.OutputChecker
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 683 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase ( ):
raise RuntimeError("CUDA out of memory.")
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ) -> Tuple:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Dict = nn.Linear(3 ,4 )
lowerCAmelCase_ : Dict = nn.BatchNormad(4 )
lowerCAmelCase_ : Optional[Any] = nn.Linear(4 ,5 )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase__ : Optional[Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase ,[1_28, 64, 32, 16, 8] )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : int ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase_ : str = mock_training_loop_function("hello" )
self.assertListEqual(__lowerCamelCase ,[1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] ,[8, "hello"] )
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase__ : Union[str, Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." ,cm.exception.args[0] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase__ : Optional[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." ,cm.exception.args[0] )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Dict ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(1_28 ,"hello" ,"world" )
self.assertIn("Batch size was passed into `f`" ,cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" ,cm.exception.args[0] )
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase__ : Optional[int] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" ,cm.exception.args[0] )
@require_cuda
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = torch.cuda.memory_allocated()
lowerCAmelCase_ : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() ,__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() ,__lowerCamelCase )
| 717 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ = 'umt5'
UpperCamelCase_ = ['past_key_values']
def __init__( self : Any ,lowerCAmelCase__ : Union[str, Any]=25_01_12 ,lowerCAmelCase__ : Tuple=5_12 ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : str=10_24 ,lowerCAmelCase__ : List[str]=8 ,lowerCAmelCase__ : Dict=None ,lowerCAmelCase__ : Optional[int]=6 ,lowerCAmelCase__ : int=32 ,lowerCAmelCase__ : Dict=1_28 ,lowerCAmelCase__ : List[Any]=0.1 ,lowerCAmelCase__ : List[Any]=1e-6 ,lowerCAmelCase__ : List[str]=1.0 ,lowerCAmelCase__ : Union[str, Any]="gated-gelu" ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : Tuple="T5Tokenizer" ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : int=0 ,lowerCAmelCase__ : int=1 ,lowerCAmelCase__ : int=0 ,**lowerCAmelCase__ : Optional[Any] ,) -> List[str]:
'''simple docstring'''
super().__init__(
is_encoder_decoder=lowerCamelCase_ ,tokenizer_class=lowerCamelCase_ ,tie_word_embeddings=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : List[Any] = d_model
lowerCAmelCase_ : List[Any] = d_kv
lowerCAmelCase_ : Optional[int] = d_ff
lowerCAmelCase_ : List[str] = num_layers
lowerCAmelCase_ : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase_ : Dict = num_heads
lowerCAmelCase_ : int = relative_attention_num_buckets
lowerCAmelCase_ : str = relative_attention_max_distance
lowerCAmelCase_ : str = dropout_rate
lowerCAmelCase_ : int = layer_norm_epsilon
lowerCAmelCase_ : Optional[Any] = initializer_factor
lowerCAmelCase_ : List[str] = feed_forward_proj
lowerCAmelCase_ : List[str] = use_cache
lowerCAmelCase_ : Any = self.feed_forward_proj.split("-" )
lowerCAmelCase_ : Tuple = act_info[-1]
lowerCAmelCase_ : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"\'gated-gelu\' or \'relu\'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase_ : Any = '''gelu_new'''
@property
def UpperCAmelCase_ ( self : Any ) -> Any:
'''simple docstring'''
return self.d_model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.num_heads
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
return self.num_layers
class __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowerCAmelCase_ : int = '''past_encoder_sequence + sequence'''
lowerCAmelCase_ : str = {0: '''batch'''}
lowerCAmelCase_ : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase_ : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase_ : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return 13
@property
def UpperCAmelCase_ ( self : List[str] ) -> float:
'''simple docstring'''
return 5e-4
| 718 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683 | 0 |
def UpperCamelCase ( snake_case__ , snake_case__):
return int(input_a == input_a == 0)
def UpperCamelCase ( ):
print("Truth Table of NOR Gate:")
print("| Input 1 | Input 2 | Output |")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = top_db
lowerCAmelCase_ : str = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : str = fft_window_size
lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : Any = max_length_s
lowerCAmelCase_ : int = max_length_s * sampling_rate
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : int = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,)
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase_ : str = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length
lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
lowerCAmelCase_ : Dict = waveform[idx : idx + max_length]
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 )
lowerCAmelCase_ : int = False
else:
lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : Optional[Any] = [
self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = True
if isinstance(input_mel[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 683 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
_lowercase : Any = TypeVar('''T''')
class __snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : Any ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Any = data
lowerCAmelCase_ : List[str] = self
lowerCAmelCase_ : Dict = 0
class __snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ) -> None:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = {}
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = DisjointSetTreeNode(snake_case_ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Union[str, Any] ) -> DisjointSetTreeNode[T]:
'''simple docstring'''
lowerCAmelCase_ : int = self.map[data]
if elem_ref != elem_ref.parent:
lowerCAmelCase_ : List[str] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Dict ) -> None:
'''simple docstring'''
if nodea.rank > nodea.rank:
lowerCAmelCase_ : Tuple = nodea
else:
lowerCAmelCase_ : Optional[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Any ) -> None:
'''simple docstring'''
self.link(self.find_set(snake_case_ ) ,self.find_set(snake_case_ ) )
class __snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self : str ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Tuple = {}
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
if node not in self.connections:
lowerCAmelCase_ : List[Any] = {}
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : List[Any] ) -> None:
'''simple docstring'''
self.add_node(snake_case_ )
self.add_node(snake_case_ )
lowerCAmelCase_ : Optional[Any] = weight
lowerCAmelCase_ : str = weight
def UpperCAmelCase_ ( self : Optional[Any] ) -> GraphUndirectedWeighted[T]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : str = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCAmelCase__ : x[2] )
# creating the disjoint set
lowerCAmelCase_ : Optional[Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = edges[index]
index += 1
lowerCAmelCase_ : Optional[int] = disjoint_set.find_set(snake_case_ )
lowerCAmelCase_ : int = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ ,snake_case_ ,snake_case_ )
disjoint_set.union(snake_case_ ,snake_case_ )
return graph
| 720 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase_ : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase_ : Any = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase_ : Tuple = Pipe()
lowerCAmelCase_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
lowerCAmelCase_ : int = temp_rs
lowerCAmelCase_ : List[Any] = temp_rr
for i in range(1 , len(snake_case__) - 1):
lowerCAmelCase_ : Dict = Pipe()
lowerCAmelCase_ : List[str] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
lowerCAmelCase_ : Dict = temp_rs
lowerCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
lowerCAmelCase_ : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = list(range(10 , 0 , -1))
print("Initial List")
print(*snake_case__)
lowerCAmelCase_ : Tuple = odd_even_transposition(snake_case__)
print("Sorted List\n")
print(*snake_case__)
if __name__ == "__main__":
main()
| 683 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = MobileNetVaConfig(layer_norm_eps=0.001)
if "_quant" in model_name:
raise ValueError("Quantized models are not supported.")
lowerCAmelCase_ : Union[str, Any] = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , __UpperCamelCase)
if matches:
lowerCAmelCase_ : str = float(matches[1])
lowerCAmelCase_ : List[str] = int(matches[2])
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCAmelCase_ : str = 10_01
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Optional[int] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset") , "r"))
lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase) + 1: v for k, v in idalabel.items()}
lowerCAmelCase_ : Dict = """background"""
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase).raw)
return im
@torch.no_grad()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False):
lowerCAmelCase_ : List[Any] = get_mobilenet_va_config(__UpperCamelCase)
# Load 🤗 model
lowerCAmelCase_ : int = MobileNetVaForImageClassification(__UpperCamelCase).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCAmelCase_ : List[str] = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
lowerCAmelCase_ : Dict = image_processor(images=prepare_img() , return_tensors="pt")
lowerCAmelCase_ : Dict = model(**__UpperCamelCase)
lowerCAmelCase_ : List[Any] = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
lowerCAmelCase_ : Any = torch.tensor([-4.1_739, -1.1_233, 3.1_205])
elif model_name == "mobilenet_v1_0.75_192":
lowerCAmelCase_ : Any = torch.tensor([-3.9_440, -2.3_141, -0.3_333])
else:
lowerCAmelCase_ : Tuple = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4)
Path(__UpperCamelCase).mkdir(exist_ok=__UpperCamelCase)
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(__UpperCamelCase)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(__UpperCamelCase)
if push_to_hub:
print("Pushing to the hub...")
lowerCAmelCase_ : int = """google/""" + model_name
image_processor.push_to_hub(__UpperCamelCase)
model.push_to_hub(__UpperCamelCase)
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowercase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 721 |
from typing import Any
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__)):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Tuple = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Any = arg_max
# The final observation
lowerCAmelCase_ : List[Any] = observations_space[len(snake_case__) - 1]
# argmax for given final observation
lowerCAmelCase_ : List[str] = ""
lowerCAmelCase_ : List[str] = -1
for k_state in states_space:
lowerCAmelCase_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Tuple = k_state
lowerCAmelCase_ : str = arg_max
# Process pointers backwards
lowerCAmelCase_ : int = last_state
lowerCAmelCase_ : int = []
for o in range(len(snake_case__) - 1 , -1 , -1):
result.append(snake_case__)
lowerCAmelCase_ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__)
_validate_dicts(
snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_list(snake_case__ , "observations_space")
_validate_list(snake_case__ , "states_space")
def UpperCamelCase ( snake_case__ , snake_case__):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list'''
raise ValueError(snake_case__)
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
_validate_dict(snake_case__ , "initial_probabilities" , snake_case__)
_validate_nested_dict(snake_case__ , "transition_probabilities")
_validate_nested_dict(snake_case__ , "emission_probabilities")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_dict(_object , snake_case__ , snake_case__)
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object):
lowerCAmelCase_ : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object.values()):
lowerCAmelCase_ : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __snake_case :
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *lowerCAmelCase__ : List[str] ,**lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = hashlib.mda(image.tobytes())
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = DepthEstimationPipeline(model=A_ ,image_processor=A_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : str ,lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} ,A_ )
import datasets
lowerCAmelCase_ : Optional[Any] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" ,"image" ,split="test" )
lowerCAmelCase_ : Any = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] ,A_ ,)
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : str = "Intel/dpt-large"
lowerCAmelCase_ : Any = pipeline("depth-estimation" ,model=A_ )
lowerCAmelCase_ : str = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
lowerCAmelCase_ : Union[str, Any] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) ,29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) ,2.662 )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 700 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowercase = (3, 9, -11, 0, 7, 5, 1, -1)
_lowercase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class __snake_case :
"""simple docstring"""
def __init__( self : str ,lowerCAmelCase__ : List[str] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = None
for i in sorted(lowerCAmelCase__ ,reverse=lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = Node(lowerCAmelCase__ ,self.head )
def __iter__( self : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.head
while node:
yield node.data
lowerCAmelCase_ : Optional[int] = node.next_node
def __len__( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Any ) -> List[str]:
'''simple docstring'''
return " -> ".join([str(lowerCAmelCase__ ) for node in self] )
def UpperCamelCase ( snake_case__ , snake_case__):
return SortedLinkedList(list(__SCREAMING_SNAKE_CASE) + list(__SCREAMING_SNAKE_CASE))
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 701 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : List[str] = None ,lowerCAmelCase__ : Tuple = None ,lowerCAmelCase__ : List[Any] = True ,lowerCAmelCase__ : Dict = None ,lowerCAmelCase__ : str = False ,lowerCAmelCase__ : Union[str, Any] = None ,lowerCAmelCase__ : Optional[int] = True ,lowerCAmelCase__ : List[str] = "arrow" ,**lowerCAmelCase__ : Tuple ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
split=lowerCAmelCase__ ,features=lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,keep_in_memory=lowerCAmelCase__ ,streaming=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : str = load_from_cache_file
lowerCAmelCase_ : Any = file_format
lowerCAmelCase_ : Union[str, Any] = Spark(
df=lowerCAmelCase__ ,features=lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,working_dir=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase_ : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase__ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 702 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowercase = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,*lowerCAmelCase__ : List[Any] ,**lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." ,__A ,)
super().__init__(*__A ,**__A )
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = CodeGenTokenizer
UpperCamelCase_ = CodeGenTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = {'add_prefix_space': True}
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : Union[str, Any] = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[str] = {"unk_token": "<unk>"}
lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : int ,**lowerCAmelCase__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ,**lowerCAmelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "lower newer"
lowerCAmelCase_ : Optional[int] = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase_ : int = "lower newer"
lowerCAmelCase_ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Tuple = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Any = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : List[str] = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = tokenizer.encode(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing the unknown token
lowerCAmelCase_ : Any = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,*lowerCAmelCase__ : Tuple ,**lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Optional[int]=15 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Union[str, Any] = "This is a simple input"
lowerCAmelCase_ : Tuple = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Optional[int] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : List[str] = tokenizer.pad_token_id
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,padding="max_length" ,max_length=30 ,return_tensors="np" )
lowerCAmelCase_ : Dict = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(*lowerCAmelCase__ ,padding="max_length" ,max_length=60 ,return_tensors="np" )
lowerCAmelCase_ : Dict = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = "$$$"
lowerCAmelCase_ : str = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowerCAmelCase__ ,add_bos_token=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = "This is a simple input"
lowerCAmelCase_ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : Optional[Any] = tokenizer.bos_token_id
lowerCAmelCase_ : Dict = tokenizer(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] ,lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : Tuple = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCAmelCase_ : List[str] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase_ : Optional[int] = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase_ : Dict = tokenizer.decode(lowerCAmelCase__ ,truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
| 704 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {
'''squeezebert/squeezebert-uncased''': 512,
'''squeezebert/squeezebert-mnli''': 512,
'''squeezebert/squeezebert-mnli-headless''': 512,
}
_lowercase = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = SqueezeBertTokenizer
def __init__( self : Optional[int] ,lowerCAmelCase__ : Optional[Any]=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : Any="[UNK]" ,lowerCAmelCase__ : str="[SEP]" ,lowerCAmelCase__ : Optional[Any]="[PAD]" ,lowerCAmelCase__ : List[str]="[CLS]" ,lowerCAmelCase__ : List[str]="[MASK]" ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : Optional[Any]=None ,**lowerCAmelCase__ : Any ,) -> str:
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
lowerCAmelCase_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,_a ) != do_lower_case
or normalizer_state.get("strip_accents" ,_a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,_a ) != tokenize_chinese_chars
):
lowerCAmelCase_ : List[Any] = getattr(_a ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : Union[str, Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Dict = tokenize_chinese_chars
lowerCAmelCase_ : List[str] = normalizer_class(**_a )
lowerCAmelCase_ : Any = do_lower_case
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Dict=None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 705 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = XLMProphetNetTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Tuple = XLMProphetNetTokenizer(a_ ,keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Tuple = """[PAD]"""
lowerCAmelCase_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) ,a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) ,a_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"[PAD]" )
self.assertEqual(vocab_keys[1] ,"[CLS]" )
self.assertEqual(vocab_keys[-1] ,"j" )
self.assertEqual(len(a_ ) ,10_12 )
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,10_12 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Tuple = XLMProphetNetTokenizer(a_ ,keep_accents=a_ )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
lowerCAmelCase_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
lowerCAmelCase_ : int = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
lowerCAmelCase_ : str = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] ,)
@cached_property
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : int = """Hello World!"""
lowerCAmelCase_ : Optional[int] = [3_53_89, 66_72, 49, 2]
self.assertListEqual(a_ ,self.big_tokenizer.encode(a_ ) )
@slow
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = {"""input_ids""": [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ ,model_name="microsoft/xprophetnet-large-wiki100-cased" ,revision="1acad1643ddd54a44df6a1b797ada8373685d90e" ,)
| 706 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionXLImgaImgPipeline
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,attention_head_dim=(2, 4) ,use_linear_projection=__lowerCAmelCase ,addition_embed_type="text_time" ,addition_time_embed_dim=8 ,transformer_layers_per_block=(1, 2) ,projection_class_embeddings_input_dim=80 ,cross_attention_dim=64 ,)
lowerCAmelCase_ : List[Any] = EulerDiscreteScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,steps_offset=1 ,beta_schedule="scaled_linear" ,timestep_spacing="leading" ,)
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=32 ,)
lowerCAmelCase_ : str = CLIPTextModel(__lowerCAmelCase )
lowerCAmelCase_ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ,local_files_only=__lowerCAmelCase )
lowerCAmelCase_ : Any = CLIPTextModelWithProjection(__lowerCAmelCase )
lowerCAmelCase_ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ,local_files_only=__lowerCAmelCase )
lowerCAmelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int=0 ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowerCAmelCase_ : Optional[int] = image / 2 + 0.5
if str(__lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase_ : Optional[Any] = torch.manual_seed(__lowerCAmelCase )
else:
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowerCAmelCase_ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def UpperCAmelCase_ ( self : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : str = self.get_dummy_components()
lowerCAmelCase_ : Any = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
lowerCAmelCase_ : int = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs(__lowerCAmelCase )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(**__lowerCAmelCase ).images
lowerCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = self.get_dummy_components()
lowerCAmelCase_ : Optional[int] = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
lowerCAmelCase_ : Optional[int] = sd_pipe.to(__lowerCAmelCase )
lowerCAmelCase_ : List[Any] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# forward without prompt embeds
lowerCAmelCase_ : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
lowerCAmelCase_ : Dict = 3 * ["this is a negative prompt"]
lowerCAmelCase_ : List[Any] = negative_prompt
lowerCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
lowerCAmelCase_ : Tuple = sd_pipe(**__lowerCAmelCase )
lowerCAmelCase_ : Dict = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase_ : str = self.get_dummy_inputs(__lowerCAmelCase )
lowerCAmelCase_ : List[Any] = 3 * ["this is a negative prompt"]
lowerCAmelCase_ : Tuple = 3 * [inputs.pop("prompt" )]
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Any = sd_pipe.encode_prompt(__lowerCAmelCase ,negative_prompt=__lowerCAmelCase )
lowerCAmelCase_ : Any = sd_pipe(
**__lowerCAmelCase ,prompt_embeds=__lowerCAmelCase ,negative_prompt_embeds=__lowerCAmelCase ,pooled_prompt_embeds=__lowerCAmelCase ,negative_pooled_prompt_embeds=__lowerCAmelCase ,)
lowerCAmelCase_ : str = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Any="cpu" ,lowerCAmelCase__ : Optional[Any]=torch.floataa ,lowerCAmelCase__ : Optional[Any]=0 ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowerCAmelCase_ : Optional[Any] = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
lowerCAmelCase_ : Optional[int] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase_ : List[str] = self.get_inputs(__lowerCAmelCase )
lowerCAmelCase_ : str = pipe(**__lowerCAmelCase ).images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : str = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 707 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor")
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor")
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor")
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
def UpperCamelCase ( snake_case__ , snake_case__ = " "):
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : int = 0
for index, char in enumerate(UpperCAmelCase__):
if char == separator:
split_words.append(string[last_index:index])
lowerCAmelCase_ : List[str] = index + 1
elif index + 1 == len(UpperCAmelCase__):
split_words.append(string[last_index : index + 1])
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
_lowercase = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_lowercase = '''>>zh<<'''
_lowercase = '''Helsinki-NLP/'''
if is_torch_available():
_lowercase = '''pt'''
elif is_tf_available():
_lowercase = '''tf'''
else:
_lowercase = '''jax'''
@require_sentencepiece
class __snake_case ( a__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MarianTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = Path(self.tmpdirname )
save_json(lowerCAmelCase__ ,save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(lowerCAmelCase__ ,save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase__ ,save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(lowerCAmelCase__ ,save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowerCAmelCase_ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : str ,**lowerCAmelCase__ : str ) -> MarianTokenizer:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase_ ( self : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = "</s>"
lowerCAmelCase_ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) ,lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"</s>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"<pad>" )
self.assertEqual(len(lowerCAmelCase__ ) ,9 )
def UpperCAmelCase_ ( self : Tuple ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
lowerCAmelCase_ : Any = en_de_tokenizer(["I am a small frog"] ,return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowerCAmelCase__ ,batch.input_ids[0] )
lowerCAmelCase_ : Dict = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = [x.name for x in Path(lowerCAmelCase__ ).glob("*" )]
self.assertIn("source.spm" ,lowerCAmelCase__ )
MarianTokenizer.from_pretrained(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = self.get_tokenizer()
lowerCAmelCase_ : List[Any] = tok(
["I am a small frog" * 10_00, "I am a small frog"] ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape ,(2, 5_12) )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : str = self.get_tokenizer()
lowerCAmelCase_ : Tuple = tok(["I am a tiny frog", "I am a small frog"] ,padding=lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ ,model_name="Helsinki-NLP/opus-mt-en-de" ,revision="1a8c2263da11e68e50938f97e10cd57820bd504c" ,decode_kwargs={"use_source_tokenizer": True} ,)
def UpperCAmelCase_ ( self : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : str = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowerCAmelCase_ : Union[str, Any] = "Tämä on testi"
lowerCAmelCase_ : Any = "This is a test"
lowerCAmelCase_ : Optional[Any] = [76, 7, 20_47, 2]
lowerCAmelCase_ : Dict = [69, 12, 11, 9_40, 2]
lowerCAmelCase_ : int = tokenizer(lowerCAmelCase__ ).input_ids
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer(text_target=lowerCAmelCase__ ).input_ids
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.decode(lowerCAmelCase__ ,skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
| 709 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = HfArgumentParser(snake_case__)
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 683 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = len(UpperCAmelCase__) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase_ : List[Any] = i + 1
else:
lowerCAmelCase_ : Optional[int] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 710 |
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__):
assert type(snake_case__) in (int, float) and decimal == int(snake_case__)
lowerCAmelCase_ : Optional[Any] = int(snake_case__)
lowerCAmelCase_ : Tuple = ""
lowerCAmelCase_ : str = False
if decimal < 0:
lowerCAmelCase_ : Tuple = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Any = divmod(snake_case__ , 16)
lowerCAmelCase_ : Dict = values[remainder] + hexadecimal
lowerCAmelCase_ : List[str] = "0x" + hexadecimal
if negative:
lowerCAmelCase_ : Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
def UpperCamelCase ( snake_case__ , snake_case__):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive")
lowerCAmelCase_ : int = str(bin(_snake_case))[2:] # remove the leading "0b"
lowerCAmelCase_ : Dict = str(bin(_snake_case))[2:] # remove the leading "0b"
lowerCAmelCase_ : Any = max(len(_snake_case) , len(_snake_case))
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1"))
for char_a, char_b in zip(a_binary.zfill(_snake_case) , b_binary.zfill(_snake_case)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class __snake_case ( UpperCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ = 'data2vec-text'
def __init__( self : Optional[Any] ,lowerCAmelCase__ : List[str]=3_05_22 ,lowerCAmelCase__ : Tuple=7_68 ,lowerCAmelCase__ : Optional[int]=12 ,lowerCAmelCase__ : str=12 ,lowerCAmelCase__ : int=30_72 ,lowerCAmelCase__ : Any="gelu" ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : Optional[int]=5_12 ,lowerCAmelCase__ : Optional[Any]=2 ,lowerCAmelCase__ : List[Any]=0.02 ,lowerCAmelCase__ : Union[str, Any]=1e-1_2 ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : Union[str, Any]=0 ,lowerCAmelCase__ : Dict=2 ,lowerCAmelCase__ : Any="absolute" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : List[Any]=None ,**lowerCAmelCase__ : Optional[Any] ,) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ ,bos_token_id=UpperCamelCase__ ,eos_token_id=UpperCamelCase__ ,**UpperCamelCase__ )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = position_embedding_type
lowerCAmelCase_ : Any = use_cache
lowerCAmelCase_ : str = classifier_dropout
class __snake_case ( UpperCamelCase_ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase_ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 712 |
import pytest
_lowercase = '''__dummy_dataset1__'''
_lowercase = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = dataset_loading_script_name
lowerCAmelCase_ : List[str] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case__)
lowerCAmelCase_ : List[Any] = script_dir / F'''{script_name}.py'''
with open(snake_case__ , "w") as f:
f.write(snake_case__)
return str(snake_case__)
| 683 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" ,revision="bf16" ,dtype=jnp.bfloataa ,)
lowerCAmelCase_ : Tuple = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : str = jax.device_count()
lowerCAmelCase_ : str = num_samples * [prompt]
lowerCAmelCase_ : Dict = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Any = replicate(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Union[str, Any] = shard(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Dict = jax.random.PRNGKey(0 )
lowerCAmelCase_ : Optional[Any] = jax.random.split(_SCREAMING_SNAKE_CASE ,jax.device_count() )
lowerCAmelCase_ : Optional[Any] = sd_pipe(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,num_inference_steps=25 ,jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
lowerCAmelCase_ : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ : Optional[int] = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase_ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ : Optional[int] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : str = "stabilityai/stable-diffusion-2"
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(_SCREAMING_SNAKE_CASE ,subfolder="scheduler" )
lowerCAmelCase_ , lowerCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE ,scheduler=_SCREAMING_SNAKE_CASE ,revision="bf16" ,dtype=jnp.bfloataa ,)
lowerCAmelCase_ : Union[str, Any] = scheduler_params
lowerCAmelCase_ : Any = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : str = jax.device_count()
lowerCAmelCase_ : Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ : Tuple = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[Any] = replicate(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : str = shard(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Dict = jax.random.PRNGKey(0 )
lowerCAmelCase_ : List[str] = jax.random.split(_SCREAMING_SNAKE_CASE ,jax.device_count() )
lowerCAmelCase_ : int = sd_pipe(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,num_inference_steps=25 ,jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
lowerCAmelCase_ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ : Union[str, Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase_ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ : List[str] = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 713 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = CodeGenTokenizer
UpperCamelCase_ = CodeGenTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = {'add_prefix_space': True}
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : int = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "lower newer"
lowerCAmelCase_ : Tuple = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase_ : Dict = "lower newer"
lowerCAmelCase_ : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Tuple = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing the unknown token
lowerCAmelCase_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,*lowerCAmelCase__ : List[str] ,**lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : str = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(lowerCAmelCase__ ,padding="max_length" ,max_length=30 ,return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
lowerCAmelCase_ : Any = tokenizer(*lowerCAmelCase__ ,padding="max_length" ,max_length=60 ,return_tensors="np" )
lowerCAmelCase_ : Optional[int] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = "$$$"
lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowerCAmelCase__ ,add_bos_token=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : int = tokenizer.bos_token_id
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] ,lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : List[str] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCAmelCase_ : str = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase_ : int = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ ,truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
| 683 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714 |
from __future__ import annotations
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : int | None = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = random()
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Any ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 )
def __str__( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = str(self.value ) + " "
lowerCAmelCase_ : List[Any] = str(self.left or "" )
lowerCAmelCase_ : Union[str, Any] = str(self.right or "" )
return value + left + right
def UpperCamelCase ( snake_case__ , snake_case__):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ : Any = split(root.left , snake_case__)
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = split(root.right , snake_case__)
return root, right
def UpperCamelCase ( snake_case__ , snake_case__):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ : Dict = merge(left.right , snake_case__)
return left
else:
lowerCAmelCase_ : List[str] = merge(snake_case__ , right.left)
return right
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = Node(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = split(snake_case__ , snake_case__)
return merge(merge(snake_case__ , snake_case__) , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = split(snake_case__ , value - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = split(snake_case__ , snake_case__)
return merge(snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=",")
inorder(root.right)
def UpperCamelCase ( snake_case__ , snake_case__):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ : List[str] = insert(snake_case__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ : Optional[int] = erase(snake_case__ , int(arg[1:]))
else:
print("Unknown command")
return root
def UpperCamelCase ( ):
lowerCAmelCase_ : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. ")
lowerCAmelCase_ : str = input()
while args != "q":
lowerCAmelCase_ : int = interact_treap(snake_case__ , snake_case__)
print(snake_case__)
lowerCAmelCase_ : str = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __snake_case ( lowercase__ ):
"""simple docstring"""
UpperCamelCase_ = """align_text_model"""
def __init__( self : List[Any] ,lowerCAmelCase__ : str=3_05_22 ,lowerCAmelCase__ : Optional[int]=7_68 ,lowerCAmelCase__ : Union[str, Any]=12 ,lowerCAmelCase__ : List[str]=12 ,lowerCAmelCase__ : List[str]=30_72 ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : Optional[Any]=0.1 ,lowerCAmelCase__ : int=0.1 ,lowerCAmelCase__ : Optional[int]=5_12 ,lowerCAmelCase__ : Tuple=2 ,lowerCAmelCase__ : str=0.02 ,lowerCAmelCase__ : str=1e-1_2 ,lowerCAmelCase__ : int=0 ,lowerCAmelCase__ : int="absolute" ,lowerCAmelCase__ : Any=True ,**lowerCAmelCase__ : List[str] ,) -> Any:
'''simple docstring'''
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[str] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Optional[int] = type_vocab_size
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : List[Any] = position_embedding_type
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : List[Any] = pad_token_id
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] ,lowerCAmelCase__ : Union[str, os.PathLike] ,**lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
cls._set_token_in_kwargs(__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : str = cls.get_config_dict(__lowercase ,**__lowercase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowerCAmelCase_ : Any = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase ,**__lowercase )
class __snake_case ( lowercase__ ):
"""simple docstring"""
UpperCamelCase_ = """align_vision_model"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : int = 3 ,lowerCAmelCase__ : int = 6_00 ,lowerCAmelCase__ : float = 2.0 ,lowerCAmelCase__ : float = 3.1 ,lowerCAmelCase__ : int = 8 ,lowerCAmelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] ,lowerCAmelCase__ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] ,lowerCAmelCase__ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] ,lowerCAmelCase__ : List[int] = [] ,lowerCAmelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] ,lowerCAmelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] ,lowerCAmelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] ,lowerCAmelCase__ : float = 0.25 ,lowerCAmelCase__ : str = "swish" ,lowerCAmelCase__ : int = 25_60 ,lowerCAmelCase__ : str = "mean" ,lowerCAmelCase__ : float = 0.02 ,lowerCAmelCase__ : float = 0.001 ,lowerCAmelCase__ : float = 0.99 ,lowerCAmelCase__ : float = 0.2 ,**lowerCAmelCase__ : Union[str, Any] ,) -> str:
'''simple docstring'''
super().__init__(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : str = width_coefficient
lowerCAmelCase_ : Union[str, Any] = depth_coefficient
lowerCAmelCase_ : int = depth_divisor
lowerCAmelCase_ : Optional[int] = kernel_sizes
lowerCAmelCase_ : Tuple = in_channels
lowerCAmelCase_ : Dict = out_channels
lowerCAmelCase_ : Dict = depthwise_padding
lowerCAmelCase_ : Any = strides
lowerCAmelCase_ : Dict = num_block_repeats
lowerCAmelCase_ : Dict = expand_ratios
lowerCAmelCase_ : Optional[Any] = squeeze_expansion_ratio
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dim
lowerCAmelCase_ : Union[str, Any] = pooling_type
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Tuple = batch_norm_eps
lowerCAmelCase_ : str = batch_norm_momentum
lowerCAmelCase_ : Optional[Any] = drop_connect_rate
lowerCAmelCase_ : Any = sum(__lowercase ) * 4
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] ,lowerCAmelCase__ : Union[str, os.PathLike] ,**lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
cls._set_token_in_kwargs(__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = cls.get_config_dict(__lowercase ,**__lowercase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowerCAmelCase_ : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase ,**__lowercase )
class __snake_case ( lowercase__ ):
"""simple docstring"""
UpperCamelCase_ = """align"""
UpperCamelCase_ = True
def __init__( self : List[Any] ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : Tuple=None ,lowerCAmelCase__ : Any=6_40 ,lowerCAmelCase__ : Tuple=1.0 ,lowerCAmelCase__ : Optional[int]=0.02 ,**lowerCAmelCase__ : str ,) -> List[Any]:
'''simple docstring'''
super().__init__(**__lowercase )
if text_config is None:
lowerCAmelCase_ : Optional[Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
lowerCAmelCase_ : List[str] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
lowerCAmelCase_ : Tuple = AlignTextConfig(**__lowercase )
lowerCAmelCase_ : int = AlignVisionConfig(**__lowercase )
lowerCAmelCase_ : Dict = projection_dim
lowerCAmelCase_ : int = temperature_init_value
lowerCAmelCase_ : List[str] = initializer_range
@classmethod
def UpperCAmelCase_ ( cls : Tuple ,lowerCAmelCase__ : AlignTextConfig ,lowerCAmelCase__ : AlignVisionConfig ,**lowerCAmelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__lowercase )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.text_config.to_dict()
lowerCAmelCase_ : Tuple = self.vision_config.to_dict()
lowerCAmelCase_ : str = self.__class__.model_type
return output
| 715 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowercase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {f"funnel-transformer/{name}": 512 for name in _model_names}
_lowercase = {f"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = 2
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : List[str]="<unk>" ,lowerCAmelCase__ : int="<sep>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : List[str]="<cls>" ,lowerCAmelCase__ : Optional[int]="<mask>" ,lowerCAmelCase__ : Union[str, Any]="<s>" ,lowerCAmelCase__ : List[str]="</s>" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : List[Any]="##" ,**lowerCAmelCase__ : int ,) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,clean_text=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,wordpieces_prefix=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[int] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : int = do_lower_case
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : str=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : str = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 | 0 |
def UpperCamelCase ( snake_case__):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase):
raise TypeError("only integers accepted as input")
else:
lowerCAmelCase_ : Any = str(abs(_lowerCAmelCase))
lowerCAmelCase_ : Dict = [list(_lowerCAmelCase) for char in range(len(_lowerCAmelCase))]
for index in range(len(_lowerCAmelCase)):
num_transpositions[index].pop(_lowerCAmelCase)
return max(
int("".join(list(_lowerCAmelCase))) for transposition in num_transpositions)
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 716 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__):
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested")
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested")
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested")
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment")
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate")
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule")
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__)
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ : int = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag('''IGNORE_RESULT''')
_lowercase = doctest.OutputChecker
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 683 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __snake_case ( UpperCamelCase__ ):
"""simple docstring"""
UpperCamelCase_ = """gpt_neox_japanese"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any]=3_20_00 ,lowerCAmelCase__ : str=25_60 ,lowerCAmelCase__ : Any=32 ,lowerCAmelCase__ : Dict=32 ,lowerCAmelCase__ : Union[str, Any]=4 ,lowerCAmelCase__ : Dict="gelu" ,lowerCAmelCase__ : List[Any]=1.00 ,lowerCAmelCase__ : int=1_00_00 ,lowerCAmelCase__ : List[str]=20_48 ,lowerCAmelCase__ : str=0.02 ,lowerCAmelCase__ : Dict=1e-5 ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : Optional[int]=3_19_96 ,lowerCAmelCase__ : Any=3_19_99 ,lowerCAmelCase__ : Union[str, Any]=0.1 ,lowerCAmelCase__ : Optional[Any]=0.0 ,**lowerCAmelCase__ : List[str] ,) -> Any:
'''simple docstring'''
super().__init__(bos_token_id=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Optional[int] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : Union[str, Any] = intermediate_multiple_size
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Tuple = rotary_emb_base
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Optional[int] = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = use_cache
lowerCAmelCase_ : List[Any] = attention_dropout
lowerCAmelCase_ : List[str] = hidden_dropout
| 717 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_lowercase = None
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
_lowercase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __snake_case ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = ["input_ids", "attention_mask"]
UpperCamelCase_ = MBartTokenizer
UpperCamelCase_ = []
UpperCamelCase_ = []
def __init__( self : int ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : List[Any]=None ,lowerCAmelCase__ : int="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Tuple="</s>" ,lowerCAmelCase__ : str="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Dict="<pad>" ,lowerCAmelCase__ : List[str]="<mask>" ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : List[str]=None ,**lowerCAmelCase__ : str ,) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
lowerCAmelCase_ : Tuple = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
lowerCAmelCase_ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCAmelCase_ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : int = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase_ : List[str] = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
lowerCAmelCase_ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] ,lowerCAmelCase__ : Optional[str] ,**lowerCAmelCase__ : str ) -> Dict:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : Any = self(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
lowerCAmelCase_ : Any = self.convert_tokens_to_ids(lowerCamelCase__ )
lowerCAmelCase_ : List[str] = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : str = "en_XX" ,lowerCAmelCase__ : Optional[List[str]] = None ,lowerCAmelCase__ : str = "ro_RO" ,**lowerCAmelCase__ : List[Any] ,) -> BatchEncoding:
'''simple docstring'''
lowerCAmelCase_ : int = src_lang
lowerCAmelCase_ : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.convert_tokens_to_ids(lowerCamelCase__ )
lowerCAmelCase_ : str = []
lowerCAmelCase_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCamelCase__ )
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCAmelCase_ : Dict = os.path.join(
lowerCamelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 718 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = top_db
lowerCAmelCase_ : str = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : str = fft_window_size
lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : Any = max_length_s
lowerCAmelCase_ : int = max_length_s * sampling_rate
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : int = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,)
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase_ : str = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length
lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
lowerCAmelCase_ : Dict = waveform[idx : idx + max_length]
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 )
lowerCAmelCase_ : int = False
else:
lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : Optional[Any] = [
self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = True
if isinstance(input_mel[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 683 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowercase : Dict = logging.get_logger(__name__)
class __snake_case ( __lowercase ):
"""simple docstring"""
UpperCamelCase_ = ['''input_features''', '''attention_mask''']
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Optional[Any]=80 ,lowerCAmelCase__ : Any=1_60_00 ,lowerCAmelCase__ : List[str]=0.0 ,lowerCAmelCase__ : Any=10 ,lowerCAmelCase__ : List[Any]=25 ,lowerCAmelCase__ : Union[str, Any]="hamming_window" ,lowerCAmelCase__ : Optional[int]=3_27_68.0 ,lowerCAmelCase__ : Optional[Any]=0.97 ,lowerCAmelCase__ : List[str]=1.0 ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : Tuple=False ,**lowerCAmelCase__ : Tuple ,) -> Optional[int]:
'''simple docstring'''
super().__init__(feature_size=__a ,sampling_rate=__a ,padding_value=__a ,**__a )
lowerCAmelCase_ : Union[str, Any] = feature_size
lowerCAmelCase_ : Dict = sampling_rate
lowerCAmelCase_ : int = padding_value
lowerCAmelCase_ : Tuple = hop_length
lowerCAmelCase_ : List[Any] = win_length
lowerCAmelCase_ : Dict = frame_signal_scale
lowerCAmelCase_ : Union[str, Any] = preemphasis_coeff
lowerCAmelCase_ : Tuple = mel_floor
lowerCAmelCase_ : Union[str, Any] = normalize_means
lowerCAmelCase_ : Tuple = normalize_vars
lowerCAmelCase_ : Dict = win_function
lowerCAmelCase_ : Dict = return_attention_mask
lowerCAmelCase_ : Any = win_length * sampling_rate // 10_00
lowerCAmelCase_ : Any = hop_length * sampling_rate // 10_00
lowerCAmelCase_ : Dict = optimal_fft_length(self.sample_size )
lowerCAmelCase_ : Any = (self.n_fft // 2) + 1
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : np.array ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
lowerCAmelCase_ : str = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=__a )
else:
lowerCAmelCase_ : Union[str, Any] = window_function(window_length=self.sample_size ,name=self.win_function )
lowerCAmelCase_ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowerCAmelCase_ : Dict = spectrogram(
one_waveform * self.frame_signal_scale ,window=__a ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=__a ,preemphasis=self.preemphasis_coeff ,mel_filters=__a ,mel_floor=self.mel_floor ,log_mel="log" ,)
return msfc_features.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
if self.normalize_means:
lowerCAmelCase_ : Optional[int] = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : Union[str, Any] = np.subtract(__a ,__a )
if self.normalize_vars:
lowerCAmelCase_ : int = x[:input_length].std(axis=0 )
lowerCAmelCase_ : List[str] = np.divide(__a ,__a )
if input_length < x.shape[0]:
lowerCAmelCase_ : List[Any] = padding_value
# make sure array is in float32
lowerCAmelCase_ : Optional[Any] = x.astype(np.floataa )
return x
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__a ,__a ,self.padding_value ) for x, n in zip(__a ,__a )]
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[int] = None ,**lowerCAmelCase__ : Any ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : str = isinstance(__a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : int = is_batched_numpy or (
isinstance(__a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : str = [np.asarray(__a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__a ,np.ndarray ):
lowerCAmelCase_ : str = np.asarray(__a ,dtype=np.floataa )
elif isinstance(__a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : str = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Optional[int] = [self._extract_mfsc_features(__a ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : Dict = BatchFeature({"input_features": features} )
lowerCAmelCase_ : Dict = self.pad(
__a ,padding=__a ,max_length=__a ,truncation=__a ,pad_to_multiple_of=__a ,return_attention_mask=__a ,**__a ,)
# make sure list is in array format
lowerCAmelCase_ : int = padded_inputs.get("input_features" )
if isinstance(input_features[0] ,__a ):
lowerCAmelCase_ : Optional[Any] = [np.asarray(__a ,dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase_ : Optional[Any] = [np.asarray(__a ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCAmelCase_ : Tuple = (
np.array(__a ,dtype=np.intaa )
if self._get_padding_strategies(__a ,max_length=__a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCAmelCase_ : Union[str, Any] = self.normalize(
padded_inputs["input_features"] ,attention_mask=__a )
if return_tensors is not None:
lowerCAmelCase_ : Optional[int] = padded_inputs.convert_to_tensors(__a )
return padded_inputs
| 720 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase_ : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase_ : Any = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase_ : Tuple = Pipe()
lowerCAmelCase_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
lowerCAmelCase_ : int = temp_rs
lowerCAmelCase_ : List[Any] = temp_rr
for i in range(1 , len(snake_case__) - 1):
lowerCAmelCase_ : Dict = Pipe()
lowerCAmelCase_ : List[str] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
lowerCAmelCase_ : Dict = temp_rs
lowerCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
lowerCAmelCase_ : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = list(range(10 , 0 , -1))
print("Initial List")
print(*snake_case__)
lowerCAmelCase_ : Tuple = odd_even_transposition(snake_case__)
print("Sorted List\n")
print(*snake_case__)
if __name__ == "__main__":
main()
| 683 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]=13 ,lowerCAmelCase__ : List[str]=30 ,lowerCAmelCase__ : Dict=2 ,lowerCAmelCase__ : int=3 ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : List[Any]=True ,lowerCAmelCase__ : Tuple=32 ,lowerCAmelCase__ : Tuple=2 ,lowerCAmelCase__ : Optional[Any]=4 ,lowerCAmelCase__ : Tuple=37 ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : Tuple=0.1 ,lowerCAmelCase__ : Optional[int]=0.1 ,lowerCAmelCase__ : Optional[int]=10 ,lowerCAmelCase__ : int=0.02 ,lowerCAmelCase__ : int=3 ,lowerCAmelCase__ : str=None ,) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : List[Any] = num_channels
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : List[str] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ : int = (image_size // patch_size) ** 2
lowerCAmelCase_ : Dict = num_patches + 1
def UpperCAmelCase_ ( self : List[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : int = None
if self.use_labels:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : Any ) -> Any:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCAmelCase__ ,initializer_range=self.initializer_range ,)
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = TFViTModel(config=UpperCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = model(UpperCAmelCase__ ,training=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowerCAmelCase_ : Optional[Any] = self.image_size // 2
lowerCAmelCase_ : Optional[int] = pixel_values[:, :, :image_size, :image_size]
lowerCAmelCase_ : List[str] = model(UpperCAmelCase__ ,interpolate_pos_encoding=UpperCAmelCase__ ,training=UpperCAmelCase__ )
lowerCAmelCase_ : Dict = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
lowerCAmelCase_ : Optional[int] = TFViTForImageClassification(UpperCAmelCase__ )
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase__ ,labels=UpperCAmelCase__ ,training=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowerCAmelCase_ : Dict = self.image_size // 2
lowerCAmelCase_ : int = pixel_values[:, :, :image_size, :image_size]
lowerCAmelCase_ : Tuple = model(UpperCAmelCase__ ,interpolate_pos_encoding=UpperCAmelCase__ ,training=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : Any = TFViTForImageClassification(UpperCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ : str = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase_ = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = TFViTModelTester(self )
lowerCAmelCase_ : Any = ConfigTester(self ,config_class=UpperCAmelCase__ ,has_text_modality=UpperCAmelCase__ ,hidden_size=37 )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowerCAmelCase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ ,tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[Any] = model_class(UpperCAmelCase__ )
lowerCAmelCase_ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : str ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : str = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : List[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(images=UpperCAmelCase__ ,return_tensors="tf" )
# forward pass
lowerCAmelCase_ : List[str] = model(**UpperCAmelCase__ )
# verify the logits
lowerCAmelCase_ : int = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,UpperCAmelCase__ )
lowerCAmelCase_ : List[Any] = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] ,UpperCAmelCase__ ,atol=1e-4 )
| 721 |
from typing import Any
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__)):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Tuple = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Any = arg_max
# The final observation
lowerCAmelCase_ : List[Any] = observations_space[len(snake_case__) - 1]
# argmax for given final observation
lowerCAmelCase_ : List[str] = ""
lowerCAmelCase_ : List[str] = -1
for k_state in states_space:
lowerCAmelCase_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Tuple = k_state
lowerCAmelCase_ : str = arg_max
# Process pointers backwards
lowerCAmelCase_ : int = last_state
lowerCAmelCase_ : int = []
for o in range(len(snake_case__) - 1 , -1 , -1):
result.append(snake_case__)
lowerCAmelCase_ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__)
_validate_dicts(
snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_list(snake_case__ , "observations_space")
_validate_list(snake_case__ , "states_space")
def UpperCamelCase ( snake_case__ , snake_case__):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list'''
raise ValueError(snake_case__)
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
_validate_dict(snake_case__ , "initial_probabilities" , snake_case__)
_validate_nested_dict(snake_case__ , "transition_probabilities")
_validate_nested_dict(snake_case__ , "emission_probabilities")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_dict(_object , snake_case__ , snake_case__)
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object):
lowerCAmelCase_ : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object.values()):
lowerCAmelCase_ : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''OwlViTFeatureExtractor''']
_lowercase = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = (KDPMaDiscreteScheduler,)
UpperCamelCase_ = 1_0
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] ,[0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ ,beta_end=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.scheduler_classes[0]
lowerCAmelCase_ : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCAmelCase_ : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : Tuple = self.dummy_model()
lowerCAmelCase_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : Union[str, Any] = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Any = scheduler.scale_model_input(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : str = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = output.prev_sample
lowerCAmelCase_ : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if torch_device == "mps":
return
lowerCAmelCase_ : List[str] = self.scheduler_classes[0]
lowerCAmelCase_ : Any = self.get_scheduler_config()
lowerCAmelCase_ : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : List[str] = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Any = scheduler.scale_model_input(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = output.prev_sample
lowerCAmelCase_ : Any = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if torch_device == "mps":
return
lowerCAmelCase_ : int = self.scheduler_classes[0]
lowerCAmelCase_ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase_ : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps ,device=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.dummy_model()
lowerCAmelCase_ : List[Any] = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase_ : List[str] = scheduler.scale_model_input(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = output.prev_sample
lowerCAmelCase_ : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if str(lowerCAmelCase__ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 701 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __snake_case ( __snake_case , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ShapEImgaImgPipeline
UpperCamelCase_ = ['image']
UpperCamelCase_ = ['image']
UpperCamelCase_ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCamelCase_ = False
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return 8
@property
def UpperCAmelCase_ ( self : int ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
lowerCAmelCase_ : Any = CLIPVisionModel(__UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Tuple = CLIPImageProcessor(
crop_size=2_24 ,do_center_crop=__UpperCamelCase ,do_normalize=__UpperCamelCase ,do_resize=__UpperCamelCase ,image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,resample=3 ,size=2_24 ,)
return image_processor
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : int = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowerCAmelCase_ : Dict = PriorTransformer(**__UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase_ : Optional[Any] = ShapERenderer(**__UpperCamelCase )
return model
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.dummy_prior
lowerCAmelCase_ : List[str] = self.dummy_image_encoder
lowerCAmelCase_ : Optional[int] = self.dummy_image_processor
lowerCAmelCase_ : Optional[Any] = self.dummy_renderer
lowerCAmelCase_ : Any = HeunDiscreteScheduler(
beta_schedule="exp" ,num_train_timesteps=10_24 ,prediction_type="sample" ,use_karras_sigmas=__UpperCamelCase ,clip_sample=__UpperCamelCase ,clip_sample_range=1.0 ,)
lowerCAmelCase_ : Optional[int] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[int]=0 ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith("mps" ):
lowerCAmelCase_ : str = torch.manual_seed(__UpperCamelCase )
else:
lowerCAmelCase_ : Optional[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = "cpu"
lowerCAmelCase_ : int = self.get_dummy_components()
lowerCAmelCase_ : int = self.pipeline_class(**__UpperCamelCase )
lowerCAmelCase_ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
lowerCAmelCase_ : List[Any] = output.images[0]
lowerCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase_ : Dict = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = torch_device == "cpu"
lowerCAmelCase_ : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=__UpperCamelCase ,relax_max_difference=__UpperCamelCase ,)
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : Tuple = self.pipeline_class(**__UpperCamelCase )
lowerCAmelCase_ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : int = self.get_dummy_inputs(__UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase_ : Tuple = batch_size * [inputs[key]]
lowerCAmelCase_ : Tuple = pipe(**__UpperCamelCase ,num_images_per_prompt=__UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
lowerCAmelCase_ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
lowerCAmelCase_ : Optional[Any] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
lowerCAmelCase_ : List[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
__UpperCamelCase ,generator=__UpperCamelCase ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="np" ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCamelCase ,__UpperCamelCase )
| 702 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_lowercase = 50003
_lowercase = 50002
@require_sentencepiece
@require_tokenizers
class __snake_case ( a__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = PLBartTokenizer
UpperCamelCase_ = None
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Dict = PLBartTokenizer(lowercase__ ,language_codes="base" ,keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = PLBartTokenizer(lowercase__ ,language_codes="base" ,keep_accents=lowercase__ )
lowerCAmelCase_ : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase__ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
lowerCAmelCase_ : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowerCAmelCase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
lowerCAmelCase_ : Any = tokenizer.vocab_size
lowerCAmelCase_ : int = [tokenizer.convert_ids_to_tokens(lowercase__ ) for x in range(end - 4 ,lowercase__ )]
self.assertListEqual(lowercase__ ,["__java__", "__python__", "__en_XX__", "<mask>"] )
lowerCAmelCase_ : Tuple = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowerCAmelCase_ : Dict = tokenizer(lowercase__ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase__ ,skip_special_tokens=lowercase__ ,clean_up_tokenization_spaces=lowercase__ ) ,lowercase__ ,)
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = PLBartTokenizer(lowercase__ ,language_codes="multi" ,keep_accents=lowercase__ )
lowerCAmelCase_ : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase__ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
lowerCAmelCase_ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
lowerCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowerCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
lowerCAmelCase_ : List[Any] = tokenizer.vocab_size
lowerCAmelCase_ : Any = [tokenizer.convert_ids_to_tokens(lowercase__ ) for x in range(end - 7 ,lowercase__ )]
self.assertListEqual(
lowercase__ ,["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowerCAmelCase_ : Optional[int] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowerCAmelCase_ : str = tokenizer(lowercase__ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase__ ,skip_special_tokens=lowercase__ ,clean_up_tokenization_spaces=lowercase__ ) ,lowercase__ ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = """uclanlp/plbart-python-en_XX"""
UpperCamelCase_ = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
UpperCamelCase_ = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
UpperCamelCase_ = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def UpperCAmelCase_ ( cls : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes="base" ,src_lang="python" ,tgt_lang="en_XX" )
lowerCAmelCase_ : Optional[Any] = 1
return cls
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] ,5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] ,5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] ,5_00_03 )
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowercase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowercase__ ,self.tokenizer.all_special_ids )
lowerCAmelCase_ : Optional[Any] = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
lowerCAmelCase_ : Any = self.tokenizer.decode(lowercase__ ,skip_special_tokens=lowercase__ )
lowerCAmelCase_ : Tuple = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.assertNotIn(self.tokenizer.eos_token ,lowercase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] ,lowercase__ )
lowerCAmelCase_ : int = 10
lowerCAmelCase_ : int = self.tokenizer(lowercase__ ,max_length=lowercase__ ,truncation=lowercase__ ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,lowercase__ )
self.assertEqual(len(lowercase__ ) ,lowercase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) ,[5_00_04, 5_00_01] )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase__ )
lowerCAmelCase_ : List[Any] = PLBartTokenizer.from_pretrained(lowercase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowercase__ )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowercase__ ,return_tensors="pt" )
lowerCAmelCase_ : Any = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,lowercase__ )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowercase__ ,truncation=lowercase__ ,max_length=len(self.expected_src_tokens ) ,return_tensors="pt" ,)
lowerCAmelCase_ : List[str] = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase__ ,lowercase__ )
self.assertEqual((2, 26) ,batch.input_ids.shape )
self.assertEqual((2, 26) ,batch.attention_mask.shape )
lowerCAmelCase_ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowercase__ )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.tokenizer(self.src_text ,padding=lowercase__ ,truncation=lowercase__ ,max_length=3 ,return_tensors="pt" )
lowerCAmelCase_ : Tuple = self.tokenizer(
text_target=self.tgt_text ,padding=lowercase__ ,truncation=lowercase__ ,max_length=10 ,return_tensors="pt" )
lowerCAmelCase_ : Optional[int] = targets['''input_ids''']
lowerCAmelCase_ : Optional[Any] = shift_tokens_right(lowercase__ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = self.tokenizer._build_translation_inputs(
"A test" ,return_tensors="pt" ,src_lang="en_XX" ,tgt_lang="java" )
self.assertEqual(
nested_simplify(lowercase__ ) ,{
# A, test, EOS, en_XX
"input_ids": [[1_50, 2_42, 2, 5_00_03]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_00_01,
} ,)
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = {}
lowerCAmelCase_ : Optional[Any] = job["""started_at"""]
lowerCAmelCase_ : Tuple = job["""completed_at"""]
lowerCAmelCase_ : Tuple = date_parser.parse(SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : Union[str, Any] = date_parser.parse(SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : Any = round((end_datetime - start_datetime).total_seconds() / 60.0)
lowerCAmelCase_ : int = start
lowerCAmelCase_ : str = end
lowerCAmelCase_ : Optional[Any] = duration_in_min
return job_info
def UpperCamelCase ( snake_case__ , snake_case__=None):
lowerCAmelCase_ : List[str] = None
if token is not None:
lowerCAmelCase_ : Optional[int] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowerCAmelCase_ : Optional[int] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCAmelCase_ : Optional[Any] = requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__).json()
lowerCAmelCase_ : Optional[int] = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(SCREAMING_SNAKE_CASE__) for job in result["jobs"]})
lowerCAmelCase_ : Optional[int] = math.ceil((result["total_count"] - 1_00) / 1_00)
for i in range(SCREAMING_SNAKE_CASE__):
lowerCAmelCase_ : Optional[int] = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE__).json()
job_time.update({job["name"]: extract_time_from_single_job(SCREAMING_SNAKE_CASE__) for job in result["jobs"]})
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''')
return {}
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
_lowercase = parser.parse_args()
_lowercase = get_job_time(args.workflow_run_id)
_lowercase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"{k}: {v['duration']}")
| 704 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[str] = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCAmelCase_ : Any = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__snake_case ) ,torch_builtin(__snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(__snake_case ) ,gelu_new(__snake_case ) ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : int = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCAmelCase_ : List[Any] = get_activation("gelu" )
lowerCAmelCase_ : Any = get_activation("gelu_10" )
lowerCAmelCase_ : int = torch_builtin(__snake_case )
lowerCAmelCase_ : Any = geluaa(__snake_case )
lowerCAmelCase_ : Dict = torch.where(y_gelu_aa < 10.0 ,1 ,0 )
self.assertTrue(torch.max(__snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask ,y_gelu_aa * clipped_mask ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__snake_case ):
get_activation("bogus" )
with self.assertRaises(__snake_case ):
get_activation(__snake_case )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = get_activation("gelu" )
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : List[str] = get_activation("gelu" )
self.assertEqual(acta.a ,1 )
with self.assertRaises(__snake_case ):
lowerCAmelCase_ : Dict = acta.a
| 705 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : int=7 ,lowerCAmelCase__ : List[str]=3 ,lowerCAmelCase__ : Optional[int]=18 ,lowerCAmelCase__ : Union[str, Any]=30 ,lowerCAmelCase__ : List[Any]=4_00 ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : Dict=None ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Optional[int]=[0.5, 0.5, 0.5] ,lowerCAmelCase__ : int=[0.5, 0.5, 0.5] ,lowerCAmelCase__ : Optional[Any]=False ,) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = size if size is not None else {"height": 20, "width": 20}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Any = min_resolution
lowerCAmelCase_ : Tuple = max_resolution
lowerCAmelCase_ : int = do_resize
lowerCAmelCase_ : int = size
lowerCAmelCase_ : str = do_center_crop
lowerCAmelCase_ : int = crop_size
lowerCAmelCase_ : Any = do_normalize
lowerCAmelCase_ : str = image_mean
lowerCAmelCase_ : Tuple = image_std
lowerCAmelCase_ : Dict = do_reduce_labels
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test")
lowerCAmelCase_ : Dict = Image.open(dataset[0]["file"])
lowerCAmelCase_ : List[str] = Image.open(dataset[1]["file"])
return image, map
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test")
lowerCAmelCase_ : int = Image.open(ds[0]["file"])
lowerCAmelCase_ : Dict = Image.open(ds[1]["file"])
lowerCAmelCase_ : Optional[int] = Image.open(ds[2]["file"])
lowerCAmelCase_ : Optional[int] = Image.open(ds[3]["file"])
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : str = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ ,"do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"image_std" ) )
def UpperCAmelCase_ ( self : str ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=lowerCAmelCase__ )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ ,Image.Image )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
lowerCAmelCase_ : Optional[int] = image_processing(lowerCAmelCase__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase__ ,numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ ,np.ndarray )
# Test not batched input
lowerCAmelCase_ : int = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
lowerCAmelCase_ : Tuple = image_processing(lowerCAmelCase__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase__ ,torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ ,torch.Tensor )
# Test not batched input
lowerCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
lowerCAmelCase_ : Dict = image_processing(lowerCAmelCase__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase__ ,torchify=lowerCAmelCase__ )
lowerCAmelCase_ : str = []
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCAmelCase_ : Dict = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
lowerCAmelCase_ : Any = image_processing(lowerCAmelCase__ ,lowerCAmelCase__ ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = prepare_semantic_single_inputs()
lowerCAmelCase_ : int = image_processing(lowerCAmelCase__ ,lowerCAmelCase__ ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = prepare_semantic_batch_inputs()
lowerCAmelCase_ : Any = image_processing(lowerCAmelCase__ ,lowerCAmelCase__ ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = prepare_semantic_single_inputs()
lowerCAmelCase_ : str = image_processing(lowerCAmelCase__ ,lowerCAmelCase__ ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Union[str, Any] = image_processing(lowerCAmelCase__ ,lowerCAmelCase__ ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 706 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention"):
lowerCAmelCase_ : List[Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :])
lowerCAmelCase_ : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2])
lowerCAmelCase_ : Optional[Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :])
lowerCAmelCase_ : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2])
lowerCAmelCase_ : str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :])
lowerCAmelCase_ : int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2])
lowerCAmelCase_ : Dict = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :])
lowerCAmelCase_ : Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2])
return k, o, q, v
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False):
if split_mlp_wi:
lowerCAmelCase_ : Dict = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
lowerCAmelCase_ : Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
lowerCAmelCase_ : Union[str, Any] = (wi_a, wi_a)
else:
lowerCAmelCase_ : Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
lowerCAmelCase_ : int = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def UpperCamelCase ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False):
lowerCAmelCase_ : int = traverse_util.flatten_dict(variables["target"])
lowerCAmelCase_ : str = {"/".join(lowercase_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Union[str, Any] = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , lowercase_)
lowerCAmelCase_ : List[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : str = old["token_embedder/embedding"]
# Encoder.
for i in range(lowercase_):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase_ , lowercase_ , "encoder" , "pre_attention_layer_norm")
lowerCAmelCase_ : str = tax_attention_lookup(lowercase_ , lowercase_ , "encoder" , "attention")
lowerCAmelCase_ : List[Any] = layer_norm
lowerCAmelCase_ : str = k.T
lowerCAmelCase_ : Optional[Any] = o.T
lowerCAmelCase_ : List[str] = q.T
lowerCAmelCase_ : int = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Dict = tax_layer_norm_lookup(lowercase_ , lowercase_ , "encoder" , "pre_mlp_layer_norm")
lowerCAmelCase_ : int = tax_mlp_lookup(lowercase_ , lowercase_ , "encoder" , lowercase_)
lowerCAmelCase_ : int = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : Any = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ : Optional[int] = tax_relpos_bias_lookup(
lowercase_ , lowercase_ , "encoder").T
lowerCAmelCase_ : str = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowerCAmelCase_ : Any = tax_relpos_bias_lookup(
lowercase_ , 0 , "encoder").T
lowerCAmelCase_ : str = tax_relpos_bias_lookup(
lowercase_ , 0 , "decoder").T
if not is_encoder_only:
# Decoder.
for i in range(lowercase_):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : List[Any] = tax_layer_norm_lookup(lowercase_ , lowercase_ , "decoder" , "pre_self_attention_layer_norm")
lowerCAmelCase_ : Any = tax_attention_lookup(lowercase_ , lowercase_ , "decoder" , "self_attention")
lowerCAmelCase_ : List[str] = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : str = o.T
lowerCAmelCase_ : List[str] = q.T
lowerCAmelCase_ : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Dict = tax_layer_norm_lookup(lowercase_ , lowercase_ , "decoder" , "pre_cross_attention_layer_norm")
lowerCAmelCase_ : int = tax_attention_lookup(lowercase_ , lowercase_ , "decoder" , "encoder_decoder_attention")
lowerCAmelCase_ : List[str] = layer_norm
lowerCAmelCase_ : str = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : int = q.T
lowerCAmelCase_ : List[Any] = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : Tuple = tax_layer_norm_lookup(lowercase_ , lowercase_ , "decoder" , "pre_mlp_layer_norm")
lowerCAmelCase_ : Tuple = tax_mlp_lookup(lowercase_ , lowercase_ , "decoder" , lowercase_)
lowerCAmelCase_ : Optional[int] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = wi[0].T
lowerCAmelCase_ : Optional[int] = wi[1].T
else:
lowerCAmelCase_ : str = wi.T
lowerCAmelCase_ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ : Optional[int] = tax_relpos_bias_lookup(lowercase_ , lowercase_ , "decoder").T
lowerCAmelCase_ : Tuple = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Any = old["decoder/logits_dense/kernel"].T
return new
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : str = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head.")
lowerCAmelCase_ : int = state_dict["shared.weight"]
return state_dict
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : int = checkpoints.load_tax_checkpoint(lowercase_)
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(
lowercase_ , num_layers=config.num_layers , is_encoder_only=lowercase_ , scalable_attention=lowercase_)
lowerCAmelCase_ : Union[str, Any] = make_state_dict(lowercase_ , lowercase_)
model.load_state_dict(lowercase_ , strict=lowercase_)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase_ : int = MTaConfig.from_json_file(lowercase_)
print(F'''Building PyTorch model from configuration: {config}''')
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = UMTaEncoderModel(lowercase_)
else:
lowerCAmelCase_ : Optional[int] = UMTaForConditionalGeneration(lowercase_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
model.save_pretrained(lowercase_)
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase_)
print("Done")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
_lowercase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 707 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor")
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor")
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor")
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowercase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("-f")
lowerCAmelCase_ : Any = parser.parse_args()
return args.f
def UpperCamelCase ( snake_case__ , snake_case__="eval"):
lowerCAmelCase_ : Optional[Any] = os.path.join(__UpperCamelCase , F'''{split}_results.json''')
if os.path.exists(__UpperCamelCase):
with open(__UpperCamelCase , "r") as f:
return json.load(__UpperCamelCase)
raise ValueError(F'''can\'t find {path}''')
_lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __snake_case ( UpperCAmelCase_ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Union[str, Any] = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_snake_case ,"argv" ,_snake_case ):
run_flax_glue.main()
lowerCAmelCase_ : int = get_results(_snake_case )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[int] = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_snake_case ,"argv" ,_snake_case ):
run_clm_flax.main()
lowerCAmelCase_ : Any = get_results(_snake_case )
self.assertLess(result["eval_perplexity"] ,1_00 )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Any = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(_snake_case ,"argv" ,_snake_case ):
run_summarization_flax.main()
lowerCAmelCase_ : Tuple = get_results(_snake_case ,split="test" )
self.assertGreaterEqual(result["test_rouge1"] ,10 )
self.assertGreaterEqual(result["test_rouge2"] ,2 )
self.assertGreaterEqual(result["test_rougeL"] ,7 )
self.assertGreaterEqual(result["test_rougeLsum"] ,7 )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : List[Any] = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(_snake_case ,"argv" ,_snake_case ):
run_mlm_flax.main()
lowerCAmelCase_ : Union[str, Any] = get_results(_snake_case )
self.assertLess(result["eval_perplexity"] ,42 )
@slow
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Dict = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_snake_case ,"argv" ,_snake_case ):
run_ta_mlm_flax.main()
lowerCAmelCase_ : int = get_results(_snake_case )
self.assertGreaterEqual(result["eval_accuracy"] ,0.42 )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Union[str, Any] = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(_snake_case ,"argv" ,_snake_case ):
run_flax_ner.main()
lowerCAmelCase_ : Tuple = get_results(_snake_case )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
self.assertGreaterEqual(result["eval_f1"] ,0.3 )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : str = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(_snake_case ,"argv" ,_snake_case ):
run_qa.main()
lowerCAmelCase_ : List[Any] = get_results(_snake_case )
self.assertGreaterEqual(result["eval_f1"] ,30 )
self.assertGreaterEqual(result["eval_exact"] ,30 )
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __snake_case :
"""simple docstring"""
def __init__( self : int ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : int = 13 ,lowerCAmelCase__ : int = 64 ,lowerCAmelCase__ : int = 2 ,lowerCAmelCase__ : int = 3 ,lowerCAmelCase__ : int = 3 ,lowerCAmelCase__ : bool = True ,lowerCAmelCase__ : bool = True ,lowerCAmelCase__ : int = 1_28 ,lowerCAmelCase__ : Optional[int]=[16, 32, 64, 1_28] ,lowerCAmelCase__ : int = 7 ,lowerCAmelCase__ : int = 4 ,lowerCAmelCase__ : int = 37 ,lowerCAmelCase__ : str = "gelu" ,lowerCAmelCase__ : float = 0.1 ,lowerCAmelCase__ : float = 0.1 ,lowerCAmelCase__ : int = 10 ,lowerCAmelCase__ : float = 0.02 ,lowerCAmelCase__ : int = 2 ,lowerCAmelCase__ : int = 1 ,lowerCAmelCase__ : int = 1_28 ,lowerCAmelCase__ : List[int] = [2, 2, 2, 2] ,lowerCAmelCase__ : int = 2 ,lowerCAmelCase__ : int = 2 ,) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[str] = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Optional[int] = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = type_sequence_label_size
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Optional[Any] = encoder_stride
lowerCAmelCase_ : Optional[int] = num_attention_outputs
lowerCAmelCase_ : Dict = embed_dim
lowerCAmelCase_ : Dict = embed_dim + 1
lowerCAmelCase_ : List[str] = resolution
lowerCAmelCase_ : List[Any] = depths
lowerCAmelCase_ : List[Any] = hidden_sizes
lowerCAmelCase_ : List[Any] = dim
lowerCAmelCase_ : int = mlp_expansion_ratio
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : List[str] = None
if self.use_labels:
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,resolution=self.resolution ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,dim=self.dim ,mlp_expansion_ratio=self.mlp_expansion_ratio ,)
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = TFEfficientFormerModel(config=_lowerCAmelCase )
lowerCAmelCase_ : str = model(_lowerCAmelCase ,training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = self.type_sequence_label_size
lowerCAmelCase_ : Dict = TFEfficientFormerForImageClassification(_lowerCAmelCase )
lowerCAmelCase_ : List[Any] = model(_lowerCAmelCase ,labels=_lowerCAmelCase ,training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : List[str] = TFEfficientFormerForImageClassification(_lowerCAmelCase )
lowerCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ : str = model(_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = config_and_inputs
lowerCAmelCase_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = TFEfficientFormerModelTester(self )
lowerCAmelCase_ : Any = ConfigTester(
self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def UpperCAmelCase_ ( self : int ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : int = model_class(_lowerCAmelCase )
lowerCAmelCase_ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ : int ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : List[Any] ):
lowerCAmelCase_ : str = model_class(_lowerCAmelCase )
lowerCAmelCase_ : List[Any] = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ,training=_lowerCAmelCase )
lowerCAmelCase_ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] = getattr(
self.model_tester ,"expected_num_hidden_layers" ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase )
if hasattr(self.model_tester ,"encoder_seq_length" ):
lowerCAmelCase_ : Dict = self.model_tester.encoder_seq_length
if hasattr(self.model_tester ,"chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCAmelCase_ : Tuple = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
if config.is_encoder_decoder:
lowerCAmelCase_ : List[str] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase ,(list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase )
lowerCAmelCase_ : Dict = getattr(self.model_tester ,"seq_length" ,_lowerCAmelCase )
lowerCAmelCase_ : Any = getattr(self.model_tester ,"decoder_seq_length" ,_lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[decoder_seq_length, self.model_tester.hidden_size] ,)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = True
check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[Any] = True
check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[Any]=False ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = super()._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[Any] = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = getattr(self.model_tester ,"seq_length" ,_lowerCAmelCase )
lowerCAmelCase_ : List[str] = getattr(self.model_tester ,"encoder_seq_length" ,_lowerCAmelCase )
lowerCAmelCase_ : List[str] = getattr(self.model_tester ,"key_length" ,_lowerCAmelCase )
lowerCAmelCase_ : str = getattr(self.model_tester ,"chunk_length" ,_lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester ,"num_hashes" ):
lowerCAmelCase_ : str = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Union[str, Any] = model_class(_lowerCAmelCase )
lowerCAmelCase_ : List[str] = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ,training=_lowerCAmelCase )
lowerCAmelCase_ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) ,self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : List[Any] = model_class(_lowerCAmelCase )
lowerCAmelCase_ : List[Any] = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ,training=_lowerCAmelCase )
lowerCAmelCase_ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) ,self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] ,)
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] ,)
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase_ : List[Any] = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase_ : Optional[int] = {
key: tf.keras.Input(shape=val.shape[1:] ,dtype=val.dtype ,name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase_ : Optional[int] = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def UpperCamelCase ( ):
lowerCAmelCase_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[str] = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCAmelCase_ : List[Any] = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : int = image_processor(images=_lowerCAmelCase ,return_tensors="tf" )
# forward pass
lowerCAmelCase_ : Dict = model(**_lowerCAmelCase ,training=_lowerCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCAmelCase_ : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCAmelCase_ : List[Any] = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=_lowerCAmelCase ,return_tensors="tf" )
# forward pass
lowerCAmelCase_ : Dict = model(**_lowerCAmelCase ,training=_lowerCAmelCase )
# verify the logits
lowerCAmelCase_ : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCAmelCase_ : Dict = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1e-4 ) )
| 709 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = HfArgumentParser(snake_case__)
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 683 | 0 |
'''simple docstring'''
def UpperCamelCase ( snake_case__):
if len(snake_case_) <= 1:
return [tuple(snake_case_)]
lowerCAmelCase_ : str = []
def generate(snake_case__ , snake_case__):
if k == 1:
res.append(tuple(arr[:]))
return
generate(k - 1 , snake_case_)
for i in range(k - 1):
if k % 2 == 0: # k is even
lowerCAmelCase_ : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase_ : Dict = arr[k - 1], arr[0]
generate(k - 1 , snake_case_)
generate(len(snake_case_) , snake_case_)
return res
if __name__ == "__main__":
_lowercase = input('''Enter numbers separated by a comma:\n''').strip()
_lowercase = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 710 |
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__):
assert type(snake_case__) in (int, float) and decimal == int(snake_case__)
lowerCAmelCase_ : Optional[Any] = int(snake_case__)
lowerCAmelCase_ : Tuple = ""
lowerCAmelCase_ : str = False
if decimal < 0:
lowerCAmelCase_ : Tuple = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Any = divmod(snake_case__ , 16)
lowerCAmelCase_ : Dict = values[remainder] + hexadecimal
lowerCAmelCase_ : List[str] = "0x" + hexadecimal
if negative:
lowerCAmelCase_ : Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCamelCase ( snake_case__):
return EnvironmentCommand()
def UpperCamelCase ( snake_case__):
return EnvironmentCommand(args.accelerate_config_file)
class __snake_case ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = parser.add_parser("env" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"--accelerate-config_file" ,default=UpperCamelCase__ ,help="The accelerate config file to use for the default values in the launching script." ,)
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : List[Any] ,lowerCAmelCase__ : Tuple ,*lowerCAmelCase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = accelerate_config_file
def UpperCAmelCase_ ( self : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Any = '''not installed'''
if is_safetensors_available():
import safetensors
lowerCAmelCase_ : str = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowerCAmelCase_ : int = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowerCAmelCase_ : List[Any] = '''not installed'''
lowerCAmelCase_ : Union[str, Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase_ : Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
lowerCAmelCase_ : Dict = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase_ : Optional[Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ ,UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
lowerCAmelCase_ : List[str] = '''not installed'''
lowerCAmelCase_ : List[str] = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ : Optional[Any] = torch.__version__
lowerCAmelCase_ : List[str] = torch.cuda.is_available()
lowerCAmelCase_ : int = '''not installed'''
lowerCAmelCase_ : str = '''NA'''
if is_tf_available():
import tensorflow as tf
lowerCAmelCase_ : Dict = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase_ : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase_ : str = bool(tf.config.list_physical_devices("GPU" ) )
lowerCAmelCase_ : int = '''not installed'''
lowerCAmelCase_ : Tuple = '''not installed'''
lowerCAmelCase_ : str = '''not installed'''
lowerCAmelCase_ : Optional[int] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase_ : Optional[Any] = flax.__version__
lowerCAmelCase_ : int = jax.__version__
lowerCAmelCase_ : Any = jaxlib.__version__
lowerCAmelCase_ : List[Any] = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase_ : Tuple = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f'''{safetensors_version}''',
'''Accelerate version''': f'''{accelerate_version}''',
'''Accelerate config''': f'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': f'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': f'''{flax_version} ({jax_backend})''',
'''Jax version''': f'''{jax_version}''',
'''JaxLib version''': f'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def UpperCAmelCase_ ( lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 711 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowercase = '''pt'''
elif is_tf_available():
_lowercase = '''tf'''
else:
_lowercase = '''jax'''
class __snake_case ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ByTaTokenizer
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ : Dict = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def UpperCAmelCase_ ( self : Dict ,**lowerCAmelCase__ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Any=False ,lowerCAmelCase__ : Dict=20 ,lowerCAmelCase__ : List[Any]=5 ) -> Tuple[str, list]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = []
for i in range(len(_lowerCamelCase ) ):
try:
lowerCAmelCase_ : List[Any] = tokenizer.decode([i] ,clean_up_tokenization_spaces=_lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[Any] = list(filter(lambda lowerCAmelCase__ : re.match(R"^[ a-zA-Z]+$" ,t[1] ) ,_lowerCamelCase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=_lowerCamelCase ) ,_lowerCamelCase ) )
if max_length is not None and len(_lowerCamelCase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(_lowerCamelCase ) < min_length and len(_lowerCamelCase ) > 0:
while len(_lowerCamelCase ) < min_length:
lowerCAmelCase_ : Any = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : Tuple = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : Optional[Any] = tokenizer.decode(_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase )
if " " not in output_txt and len(_lowerCamelCase ) > 1:
lowerCAmelCase_ : int = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=_lowerCamelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=_lowerCamelCase )
)
if with_prefix_space:
lowerCAmelCase_ : Union[str, Any] = " " + output_txt
lowerCAmelCase_ : Optional[int] = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
return output_txt, output_ids
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.ta_base_tokenizer
lowerCAmelCase_ : Optional[int] = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowerCAmelCase_ : int = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] ,batch_without_eos_added["input_ids"] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.ta_base_tokenizer
lowerCAmelCase_ : List[str] = "Unicode €."
lowerCAmelCase_ : Tuple = tokenizer(_lowerCamelCase )
lowerCAmelCase_ : Dict = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["input_ids"] ,_lowerCamelCase )
# decoding
lowerCAmelCase_ : Tuple = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,"Unicode €.</s>" )
lowerCAmelCase_ : Tuple = tokenizer("e è é ê ë" )
lowerCAmelCase_ : Union[str, Any] = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["input_ids"] ,_lowerCamelCase )
# decoding
lowerCAmelCase_ : List[str] = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,"e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) ,"e è é ê ë</s>" )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.ta_base_tokenizer
lowerCAmelCase_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowerCAmelCase_ : List[str] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
lowerCAmelCase_ : int = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Tuple = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual((2, 37) ,batch.input_ids.shape )
self.assertEqual((2, 37) ,batch.attention_mask.shape )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.ta_base_tokenizer
lowerCAmelCase_ : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase_ : Optional[Any] = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors=_lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" ,_lowerCamelCase )
self.assertIn("attention_mask" ,_lowerCamelCase )
self.assertNotIn("decoder_input_ids" ,_lowerCamelCase )
self.assertNotIn("decoder_attention_mask" ,_lowerCamelCase )
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.ta_base_tokenizer
lowerCAmelCase_ : Any = [
"Summary of the text.",
"Another summary.",
]
lowerCAmelCase_ : str = tokenizer(
text_target=_lowerCamelCase ,max_length=32 ,padding="max_length" ,truncation=_lowerCamelCase ,return_tensors=_lowerCamelCase )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.ta_base_tokenizer
lowerCAmelCase_ : Dict = ["A long paragraph for summarization. </s>"]
lowerCAmelCase_ : Tuple = ["Summary of the text. </s>"]
# fmt: off
lowerCAmelCase_ : Tuple = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
lowerCAmelCase_ : str = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(_lowerCamelCase ,text_target=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,batch["input_ids"][0] )
self.assertEqual(_lowerCamelCase ,batch["labels"][0] )
def UpperCAmelCase_ ( self : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
lowerCAmelCase_ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = " He is very happy, UNwant\u00E9d,running"
lowerCAmelCase_ : List[Any] = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCAmelCase_ : List[Any] = tokenizer.__class__.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ : List[str] = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
lowerCAmelCase_ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : Dict = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowerCAmelCase_ : List[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCAmelCase_ : int = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = tokenizer.__class__.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertIn("new_additional_special_token" ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(_lowerCamelCase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(_lowerCamelCase )
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,"special_tokens_map.json" ) ,encoding="utf-8" ) as json_file:
lowerCAmelCase_ : List[Any] = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,"tokenizer_config.json" ) ,encoding="utf-8" ) as json_file:
lowerCAmelCase_ : List[str] = json.load(_lowerCamelCase )
lowerCAmelCase_ : List[str] = [f'''<extra_id_{i}>''' for i in range(1_25 )]
lowerCAmelCase_ : str = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowerCAmelCase_ : Dict = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_lowerCamelCase ,"special_tokens_map.json" ) ,"w" ,encoding="utf-8" ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,"tokenizer_config.json" ) ,"w" ,encoding="utf-8" ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : Tuple = tokenizer_class.from_pretrained(
_lowerCamelCase ,)
self.assertIn(
"an_additional_special_token" ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Any = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" ,lstrip=_lowerCamelCase )]
lowerCAmelCase_ : Any = tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,)
self.assertIn("a_new_additional_special_token" ,tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) ,)
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
lowerCAmelCase_ : Tuple = tokenizer_class.from_pretrained(_lowerCamelCase )
self.assertTrue(tokenizer.decode([2_55] ) == "" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=_lowerCamelCase ,do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ : Optional[int] = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowerCAmelCase_ : List[str] = tokenizer.convert_tokens_to_string(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ : Optional[int] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(
_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase ,attr + "_id" ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,attr + "_id" ) ,_lowerCamelCase )
setattr(_lowerCamelCase ,attr + "_id" ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,attr + "_id" ) ,_lowerCamelCase )
setattr(_lowerCamelCase ,"additional_special_tokens_ids" ,[] )
self.assertListEqual(getattr(_lowerCamelCase ,"additional_special_tokens" ) ,[] )
self.assertListEqual(getattr(_lowerCamelCase ,"additional_special_tokens_ids" ) ,[] )
setattr(_lowerCamelCase ,"additional_special_tokens_ids" ,[token_id_to_test_setters] )
self.assertListEqual(getattr(_lowerCamelCase ,"additional_special_tokens" ) ,[token_to_test_setters] )
self.assertListEqual(getattr(_lowerCamelCase ,"additional_special_tokens_ids" ) ,[token_id_to_test_setters] ) | 712 |
import pytest
_lowercase = '''__dummy_dataset1__'''
_lowercase = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = dataset_loading_script_name
lowerCAmelCase_ : List[str] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case__)
lowerCAmelCase_ : List[Any] = script_dir / F'''{script_name}.py'''
with open(snake_case__ , "w") as f:
f.write(snake_case__)
return str(snake_case__)
| 683 | 0 |
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = ""
for i in table:
res += inp[i - 1]
return res
def UpperCamelCase ( snake_case__):
return data[1:] + data[0]
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Union[str, Any] = ""
for i in range(len(__snake_case)):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : int = int("0b" + data[0] + data[-1] , 2)
lowerCAmelCase_ : Union[str, Any] = int("0b" + data[1:3] , 2)
return bin(s[row][col])[2:]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = message[:4]
lowerCAmelCase_ : int = message[4:]
lowerCAmelCase_ : int = apply_table(__snake_case , __snake_case)
lowerCAmelCase_ : Union[str, Any] = xor(__snake_case , __snake_case)
lowerCAmelCase_ : Tuple = apply_sbox(__snake_case , temp[:4]) # noqa: E741
lowerCAmelCase_ : List[str] = apply_sbox(__snake_case , temp[4:])
lowerCAmelCase_ : int = "0" * (2 - len(__snake_case)) + l # noqa: E741
lowerCAmelCase_ : int = "0" * (2 - len(__snake_case)) + r
lowerCAmelCase_ : Optional[Any] = apply_table(l + r , __snake_case)
lowerCAmelCase_ : Tuple = xor(__snake_case , __snake_case)
return temp + right
if __name__ == "__main__":
_lowercase = input('''Enter 10 bit key: ''')
_lowercase = input('''Enter 8 bit message: ''')
_lowercase = [6, 3, 7, 4, 8, 5, 10, 9]
_lowercase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowercase = [2, 4, 3, 1]
_lowercase = [2, 6, 3, 1, 4, 8, 5, 7]
_lowercase = [4, 1, 3, 5, 7, 2, 8, 6]
_lowercase = [4, 1, 2, 3, 2, 3, 4, 1]
_lowercase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowercase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowercase = apply_table(key, paa_table)
_lowercase = temp[:5]
_lowercase = temp[5:]
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = apply_table(left + right, pa_table)
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = apply_table(left + right, pa_table)
# encryption
_lowercase = apply_table(message, IP)
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = temp[4:] + temp[:4]
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
_lowercase = apply_table(CT, IP)
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = temp[4:] + temp[:4]
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 713 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = CodeGenTokenizer
UpperCamelCase_ = CodeGenTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = {'add_prefix_space': True}
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : int = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "lower newer"
lowerCAmelCase_ : Tuple = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase_ : Dict = "lower newer"
lowerCAmelCase_ : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Tuple = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing the unknown token
lowerCAmelCase_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,*lowerCAmelCase__ : List[str] ,**lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : str = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(lowerCAmelCase__ ,padding="max_length" ,max_length=30 ,return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
lowerCAmelCase_ : Any = tokenizer(*lowerCAmelCase__ ,padding="max_length" ,max_length=60 ,return_tensors="np" )
lowerCAmelCase_ : Optional[int] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = "$$$"
lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowerCAmelCase__ ,add_bos_token=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : int = tokenizer.bos_token_id
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] ,lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : List[str] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCAmelCase_ : str = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase_ : int = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ ,truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
| 683 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : List[str]=7 ,lowerCAmelCase__ : Optional[Any]=3 ,lowerCAmelCase__ : List[Any]=18 ,lowerCAmelCase__ : int=30 ,lowerCAmelCase__ : Union[str, Any]=4_00 ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : List[str]=None ,):
'''simple docstring'''
lowerCAmelCase_ : int = size if size is not None else {"shortest_edge": 20}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Optional[Any] = image_size
lowerCAmelCase_ : Union[str, Any] = min_resolution
lowerCAmelCase_ : Any = max_resolution
lowerCAmelCase_ : Union[str, Any] = do_resize
lowerCAmelCase_ : int = size
lowerCAmelCase_ : Any = do_center_crop
lowerCAmelCase_ : int = crop_size
def UpperCAmelCase_ ( self : Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __snake_case ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Any ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : str ):
'''simple docstring'''
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,"do_resize" ) )
self.assertTrue(hasattr(__A ,"size" ) )
self.assertTrue(hasattr(__A ,"do_center_crop" ) )
self.assertTrue(hasattr(__A ,"crop_size" ) )
def UpperCAmelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
lowerCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def UpperCAmelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Any ):
'''simple docstring'''
lowerCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
lowerCAmelCase_ : Dict = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
lowerCAmelCase_ : Union[str, Any] = image_processing(__A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCAmelCase_ ( self : int ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
lowerCAmelCase_ : Dict = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
lowerCAmelCase_ : int = image_processing(__A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCAmelCase_ ( self : str ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
lowerCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
lowerCAmelCase_ : List[Any] = image_processing(__A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 714 |
from __future__ import annotations
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : int | None = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = random()
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Any ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 )
def __str__( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = str(self.value ) + " "
lowerCAmelCase_ : List[Any] = str(self.left or "" )
lowerCAmelCase_ : Union[str, Any] = str(self.right or "" )
return value + left + right
def UpperCamelCase ( snake_case__ , snake_case__):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ : Any = split(root.left , snake_case__)
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = split(root.right , snake_case__)
return root, right
def UpperCamelCase ( snake_case__ , snake_case__):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ : Dict = merge(left.right , snake_case__)
return left
else:
lowerCAmelCase_ : List[str] = merge(snake_case__ , right.left)
return right
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = Node(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = split(snake_case__ , snake_case__)
return merge(merge(snake_case__ , snake_case__) , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = split(snake_case__ , value - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = split(snake_case__ , snake_case__)
return merge(snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=",")
inorder(root.right)
def UpperCamelCase ( snake_case__ , snake_case__):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ : List[str] = insert(snake_case__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ : Optional[int] = erase(snake_case__ , int(arg[1:]))
else:
print("Unknown command")
return root
def UpperCamelCase ( ):
lowerCAmelCase_ : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. ")
lowerCAmelCase_ : str = input()
while args != "q":
lowerCAmelCase_ : int = interact_treap(snake_case__ , snake_case__)
print(snake_case__)
lowerCAmelCase_ : str = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCamelCase ( snake_case__):
return {key.lstrip("-"): value for key, value in zip(unknown_args[::2] , unknown_args[1::2])}
def UpperCamelCase ( ):
lowerCAmelCase_ : Union[str, Any] = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=UpperCamelCase__)
lowerCAmelCase_ : Optional[int] = parser.add_subparsers(help="datasets-cli command helpers")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__)
EnvironmentCommand.register_subcommand(UpperCamelCase__)
TestCommand.register_subcommand(UpperCamelCase__)
RunBeamCommand.register_subcommand(UpperCamelCase__)
DummyDataCommand.register_subcommand(UpperCamelCase__)
# Parse args
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = parser.parse_known_args()
if not hasattr(UpperCamelCase__ , "func"):
parser.print_help()
exit(1)
lowerCAmelCase_ : Union[str, Any] = parse_unknown_args(UpperCamelCase__)
# Run
lowerCAmelCase_ : List[str] = args.func(UpperCamelCase__ , **UpperCamelCase__)
service.run()
if __name__ == "__main__":
main()
| 715 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowercase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {f"funnel-transformer/{name}": 512 for name in _model_names}
_lowercase = {f"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = 2
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : List[str]="<unk>" ,lowerCAmelCase__ : int="<sep>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : List[str]="<cls>" ,lowerCAmelCase__ : Optional[int]="<mask>" ,lowerCAmelCase__ : Union[str, Any]="<s>" ,lowerCAmelCase__ : List[str]="</s>" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : List[Any]="##" ,**lowerCAmelCase__ : int ,) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,clean_text=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,wordpieces_prefix=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[int] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : int = do_lower_case
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : str=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : str = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 | 0 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : int ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = value
lowerCAmelCase_ : Dict = None # Added in order to delete a node easier
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Tuple = None
def __repr__( self : str ) -> Any:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : List[str] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : str = root
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> Optional[Any]:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__SCREAMING_SNAKE_CASE ): # If it is the right children
lowerCAmelCase_ : Union[str, Any] = new_children
else:
lowerCAmelCase_ : Optional[int] = new_children
else:
lowerCAmelCase_ : Tuple = new_children
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node ) -> List[str]:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[Any] ) -> str:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : int ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = Node(__SCREAMING_SNAKE_CASE ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : str = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : Optional[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : str = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : int = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Optional[int] = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[Any] = parent_node
def UpperCAmelCase_ ( self : List[str] ,*lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
for value in values:
self.__insert(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Dict = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None = None ) -> Tuple:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : int = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : str = node.right
return node
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node | None = None ) -> List[Any]:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Any = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Dict = node.left
return node
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Any = self.search(__SCREAMING_SNAKE_CASE ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
elif node.left is None: # Has only right children
self.__reassign_nodes(__SCREAMING_SNAKE_CASE ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__SCREAMING_SNAKE_CASE ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Dict = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : Node | None ) -> Any:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Dict=None ) -> List[str]:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> Any:
'''simple docstring'''
if node:
self.inorder(__SCREAMING_SNAKE_CASE ,node.left )
arr.append(node.value )
self.inorder(__SCREAMING_SNAKE_CASE ,node.right )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
self.inorder(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = []
if curr_node is not None:
lowerCAmelCase_ : str = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : int = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(_UpperCAmelCase)
# Prints all the elements of the list in order traversal
print(_UpperCAmelCase)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn\'t exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn\'t exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(_UpperCAmelCase)
print(_UpperCAmelCase)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 716 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__):
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested")
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested")
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested")
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment")
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate")
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule")
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__)
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ : int = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag('''IGNORE_RESULT''')
_lowercase = doctest.OutputChecker
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 683 | 0 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Any = list(range(len(snake_case__)))
lowerCAmelCase_ : Optional[int] = [v / w for v, w in zip(snake_case__ , snake_case__)]
index.sort(key=lambda snake_case__: ratio[i] , reverse=snake_case__)
lowerCAmelCase_ : float = 0
lowerCAmelCase_ : list[float] = [0] * len(snake_case__)
for i in index:
if weight[i] <= capacity:
lowerCAmelCase_ : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase_ : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = 1.0e4 , snake_case__ = False , snake_case__ = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
lowerCAmelCase_ : List[str] = float(embedding_dim // 2)
lowerCAmelCase_ : int = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)
lowerCAmelCase_ : str = min_timescale * jnp.exp(jnp.arange(_lowercase , dtype=jnp.floataa) * -log_timescale_increment)
lowerCAmelCase_ : Optional[int] = jnp.expand_dims(_lowercase , 1) * jnp.expand_dims(_lowercase , 0)
# scale embeddings
lowerCAmelCase_ : str = scale * emb
if flip_sin_to_cos:
lowerCAmelCase_ : Optional[int] = jnp.concatenate([jnp.cos(_lowercase), jnp.sin(_lowercase)] , axis=1)
else:
lowerCAmelCase_ : str = jnp.concatenate([jnp.sin(_lowercase), jnp.cos(_lowercase)] , axis=1)
lowerCAmelCase_ : Any = jnp.reshape(_lowercase , [jnp.shape(_lowercase)[0], embedding_dim])
return signal
class __snake_case ( nn.Module ):
"""simple docstring"""
UpperCamelCase_ = 3_2
UpperCamelCase_ = jnp.floataa
@nn.compact
def __call__( self : List[Any] ,lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name="linear_1" )(__A )
lowerCAmelCase_ : Dict = nn.silu(__A )
lowerCAmelCase_ : Union[str, Any] = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name="linear_2" )(__A )
return temb
class __snake_case ( nn.Module ):
"""simple docstring"""
UpperCamelCase_ = 3_2
UpperCamelCase_ = False
UpperCamelCase_ = 1
@nn.compact
def __call__( self : Union[str, Any] ,lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
return get_sinusoidal_embeddings(
__A ,embedding_dim=self.dim ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.freq_shift )
| 718 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowercase = logging.get_logger(__name__)
_lowercase = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
_lowercase = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
_lowercase = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
_lowercase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
_lowercase = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
_lowercase = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
_lowercase = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
_lowercase = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
_lowercase = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
_lowercase = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
_lowercase = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
_lowercase = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
_lowercase = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
_lowercase = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
_lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowercase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowercase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowercase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowercase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowercase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowercase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowercase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowercase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowercase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_MAPPING
_lowercase = auto_class_update(FlaxAutoModel)
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowercase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowercase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowercase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowercase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowercase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowercase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowercase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowercase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowercase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class __snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCamelCase_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowercase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 719 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = top_db
lowerCAmelCase_ : str = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : str = fft_window_size
lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : Any = max_length_s
lowerCAmelCase_ : int = max_length_s * sampling_rate
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : int = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,)
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase_ : str = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length
lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
lowerCAmelCase_ : Dict = waveform[idx : idx + max_length]
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 )
lowerCAmelCase_ : int = False
else:
lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : Optional[Any] = [
self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = True
if isinstance(input_mel[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 683 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = get_activation("swish" )
self.assertIsInstance(A__ ,nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Tuple = get_activation("silu" )
self.assertIsInstance(A__ ,nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = get_activation("mish" )
self.assertIsInstance(A__ ,nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = get_activation("gelu" )
self.assertIsInstance(A__ ,nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
| 720 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase_ : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase_ : Any = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase_ : Tuple = Pipe()
lowerCAmelCase_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
lowerCAmelCase_ : int = temp_rs
lowerCAmelCase_ : List[Any] = temp_rr
for i in range(1 , len(snake_case__) - 1):
lowerCAmelCase_ : Dict = Pipe()
lowerCAmelCase_ : List[str] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
lowerCAmelCase_ : Dict = temp_rs
lowerCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
lowerCAmelCase_ : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = list(range(10 , 0 , -1))
print("Initial List")
print(*snake_case__)
lowerCAmelCase_ : Tuple = odd_even_transposition(snake_case__)
print("Sorted List\n")
print(*snake_case__)
if __name__ == "__main__":
main()
| 683 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = 'lxmert'
UpperCamelCase_ = {}
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Union[str, Any]=3_05_22 ,lowerCAmelCase__ : Any=7_68 ,lowerCAmelCase__ : Union[str, Any]=12 ,lowerCAmelCase__ : Optional[Any]=95_00 ,lowerCAmelCase__ : Optional[Any]=16_00 ,lowerCAmelCase__ : Optional[Any]=4_00 ,lowerCAmelCase__ : str=30_72 ,lowerCAmelCase__ : List[Any]="gelu" ,lowerCAmelCase__ : int=0.1 ,lowerCAmelCase__ : Optional[int]=0.1 ,lowerCAmelCase__ : Optional[int]=5_12 ,lowerCAmelCase__ : str=2 ,lowerCAmelCase__ : Tuple=0.02 ,lowerCAmelCase__ : int=1e-1_2 ,lowerCAmelCase__ : Optional[int]=9 ,lowerCAmelCase__ : List[Any]=5 ,lowerCAmelCase__ : Any=5 ,lowerCAmelCase__ : Optional[int]=20_48 ,lowerCAmelCase__ : Tuple=4 ,lowerCAmelCase__ : str=6.67 ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : Any=True ,**lowerCAmelCase__ : List[str] ,) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Union[str, Any] = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Tuple = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : List[Any] = layer_norm_eps
lowerCAmelCase_ : Tuple = num_qa_labels
lowerCAmelCase_ : List[Any] = num_object_labels
lowerCAmelCase_ : Tuple = num_attr_labels
lowerCAmelCase_ : List[Any] = l_layers
lowerCAmelCase_ : Tuple = x_layers
lowerCAmelCase_ : Union[str, Any] = r_layers
lowerCAmelCase_ : int = visual_feat_dim
lowerCAmelCase_ : List[Any] = visual_pos_dim
lowerCAmelCase_ : Tuple = visual_loss_normalizer
lowerCAmelCase_ : Tuple = task_matched
lowerCAmelCase_ : int = task_mask_lm
lowerCAmelCase_ : List[Any] = task_obj_predict
lowerCAmelCase_ : List[str] = task_qa
lowerCAmelCase_ : str = visual_obj_loss
lowerCAmelCase_ : Dict = visual_attr_loss
lowerCAmelCase_ : Dict = visual_feat_loss
lowerCAmelCase_ : List[Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**_a )
| 721 |
from typing import Any
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__)):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Tuple = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Any = arg_max
# The final observation
lowerCAmelCase_ : List[Any] = observations_space[len(snake_case__) - 1]
# argmax for given final observation
lowerCAmelCase_ : List[str] = ""
lowerCAmelCase_ : List[str] = -1
for k_state in states_space:
lowerCAmelCase_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Tuple = k_state
lowerCAmelCase_ : str = arg_max
# Process pointers backwards
lowerCAmelCase_ : int = last_state
lowerCAmelCase_ : int = []
for o in range(len(snake_case__) - 1 , -1 , -1):
result.append(snake_case__)
lowerCAmelCase_ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__)
_validate_dicts(
snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_list(snake_case__ , "observations_space")
_validate_list(snake_case__ , "states_space")
def UpperCamelCase ( snake_case__ , snake_case__):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list'''
raise ValueError(snake_case__)
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
_validate_dict(snake_case__ , "initial_probabilities" , snake_case__)
_validate_nested_dict(snake_case__ , "transition_probabilities")
_validate_nested_dict(snake_case__ , "emission_probabilities")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_dict(_object , snake_case__ , snake_case__)
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object):
lowerCAmelCase_ : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object.values()):
lowerCAmelCase_ : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = JukeboxTokenizer
UpperCamelCase_ = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
import torch
lowerCAmelCase_ : Any = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowerCAmelCase_ : int = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowerCAmelCase_ : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
import torch
lowerCAmelCase_ : int = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowerCAmelCase_ : List[str] = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowerCAmelCase_ : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 700 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_lowercase = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case_ ):
"""simple docstring"""
UpperCamelCase_ = """maskformer"""
UpperCamelCase_ = {"""hidden_size""": """mask_feature_size"""}
UpperCamelCase_ = ["""resnet""", """swin"""]
UpperCamelCase_ = ["""detr"""]
def __init__( self : List[Any] ,lowerCAmelCase__ : List[str] = 2_56 ,lowerCAmelCase__ : Optional[int] = 2_56 ,lowerCAmelCase__ : Tuple = 0.1 ,lowerCAmelCase__ : Dict = False ,lowerCAmelCase__ : List[Any] = None ,lowerCAmelCase__ : Dict = None ,lowerCAmelCase__ : Tuple = 0.02 ,lowerCAmelCase__ : Any = 1.0 ,lowerCAmelCase__ : str = 1.0 ,lowerCAmelCase__ : int = 1.0 ,lowerCAmelCase__ : Any = 20.0 ,lowerCAmelCase__ : List[str] = None ,**lowerCAmelCase__ : Union[str, Any] ,) -> str:
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCAmelCase_ : Optional[int] = SwinConfig(
image_size=3_84 ,in_channels=3 ,patch_size=4 ,embed_dim=1_28 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=["stage1", "stage2", "stage3", "stage4"] ,)
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = backbone_config.pop("model_type" )
lowerCAmelCase_ : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : Optional[Any] = config_class.from_dict(lowerCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCAmelCase_ : Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
lowerCAmelCase_ : Union[str, Any] = (
decoder_config.pop("model_type" ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[Any] = CONFIG_MAPPING[decoder_type]
lowerCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = backbone_config
lowerCAmelCase_ : str = decoder_config
# main feature dimension for the model
lowerCAmelCase_ : Dict = fpn_feature_size
lowerCAmelCase_ : Dict = mask_feature_size
# initializer
lowerCAmelCase_ : Dict = init_std
lowerCAmelCase_ : Tuple = init_xavier_std
# Hungarian matcher && loss
lowerCAmelCase_ : Dict = cross_entropy_weight
lowerCAmelCase_ : List[str] = dice_weight
lowerCAmelCase_ : Dict = mask_weight
lowerCAmelCase_ : Tuple = use_auxiliary_loss
lowerCAmelCase_ : str = no_object_weight
lowerCAmelCase_ : Union[str, Any] = output_auxiliary_logits
lowerCAmelCase_ : List[str] = self.decoder_config.encoder_attention_heads
lowerCAmelCase_ : int = self.decoder_config.num_hidden_layers
super().__init__(**lowerCAmelCase__ )
@classmethod
def UpperCAmelCase_ ( cls : int ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : List[Any] ,**lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return cls(
backbone_config=lowerCAmelCase__ ,decoder_config=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
def UpperCAmelCase_ ( self : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase_ : Dict = self.decoder_config.to_dict()
lowerCAmelCase_ : Tuple = self.__class__.model_type
return output
| 701 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 0 |
import argparse
from collections import defaultdict
import yaml
_lowercase = '''docs/source/en/_toctree.yml'''
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = defaultdict(SCREAMING_SNAKE_CASE_)
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase_ : str = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ : Any = []
for duplicate_key in duplicates:
lowerCAmelCase_ : Any = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key})
if len(SCREAMING_SNAKE_CASE_) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others.")
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda snake_case__: s["title"].lower())
def UpperCamelCase ( snake_case__=False):
with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8") as f:
lowerCAmelCase_ : List[str] = yaml.safe_load(f.read())
# Get to the API doc
lowerCAmelCase_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ : Dict = content[api_idx]["sections"]
# Then to the model doc
lowerCAmelCase_ : int = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase_ : List[Any] = api_doc[model_idx]["sections"]
lowerCAmelCase_ : Union[str, Any] = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE_) if "sections" in section]
lowerCAmelCase_ : Tuple = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase_ : str = modality_doc["sections"]
lowerCAmelCase_ : int = clean_model_doc_toc(SCREAMING_SNAKE_CASE_)
if old_modality_doc != new_modality_doc:
lowerCAmelCase_ : List[Any] = True
if overwrite:
lowerCAmelCase_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase_ : Optional[int] = model_doc
lowerCAmelCase_ : List[Any] = api_doc
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE_ , allow_unicode=SCREAMING_SNAKE_CASE_))
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 702 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 | 0 |
# Imports
import numpy as np
class __snake_case :
"""simple docstring"""
def __init__( self : List[str] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Union[str, Any]=None ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : Tuple=None ,lowerCAmelCase__ : str=None ) -> str:
'''simple docstring'''
self.set_matricies(red=lowerCAmelCase__ ,green=lowerCAmelCase__ ,blue=lowerCAmelCase__ ,red_edge=lowerCAmelCase__ ,nir=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any]=None ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : str=None ,lowerCAmelCase__ : List[Any]=None ) -> Optional[Any]:
'''simple docstring'''
if red is not None:
lowerCAmelCase_ : int = red
if green is not None:
lowerCAmelCase_ : Optional[int] = green
if blue is not None:
lowerCAmelCase_ : Optional[Any] = blue
if red_edge is not None:
lowerCAmelCase_ : int = red_edge
if nir is not None:
lowerCAmelCase_ : List[str] = nir
return True
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : Optional[int]="" ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : List[Any]=None ,lowerCAmelCase__ : List[Any]=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Dict=None ) -> str:
'''simple docstring'''
self.set_matricies(red=lowerCAmelCase__ ,green=lowerCAmelCase__ ,blue=lowerCAmelCase__ ,red_edge=lowerCAmelCase__ ,nir=lowerCAmelCase__ )
lowerCAmelCase_ : Any = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase_ ( self : int ) -> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase_ ( self : int ) -> List[str]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase_ ( self : int ) -> str:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Union[str, Any]=0.08 ,lowerCAmelCase__ : str=1.22 ,lowerCAmelCase__ : Any=0.03 ) -> Any:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCAmelCase_ ( self : Any ) -> List[str]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int=0.16 ) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[str]=0.5 ) -> Optional[int]:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase_ ( self : str ) -> List[Any]:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : Optional[int]=None ) -> List[Any]:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase_ ( self : Tuple ) -> int:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase_ ( self : str ) -> Any:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase_ ( self : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : int = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase_ : List[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase_ ( self : List[str] ) -> Any:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __snake_case ( __A , __A , __A ):
"""simple docstring"""
UpperCamelCase_ = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self : List[Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Union[str, Any] = None ,lowerCAmelCase__ : List[Any] = 5_02_57 ,lowerCAmelCase__ : Tuple = 10_24 ,lowerCAmelCase__ : List[Any] = 7_68 ,lowerCAmelCase__ : Dict = 12 ,lowerCAmelCase__ : Tuple = 12 ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : str = "gelu_new" ,lowerCAmelCase__ : Optional[Any] = 0.1 ,lowerCAmelCase__ : Tuple = 0.1 ,lowerCAmelCase__ : Optional[int] = 0.1 ,lowerCAmelCase__ : Tuple = 1e-5 ,lowerCAmelCase__ : int = 0.02 ,lowerCAmelCase__ : Any = True ,lowerCAmelCase__ : List[str] = True ,lowerCAmelCase__ : Optional[int] = False ,lowerCAmelCase__ : Tuple = False ,) -> List[Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
lowerCAmelCase_ : Optional[Any] = prefix_inner_dim
lowerCAmelCase_ : Any = prefix_hidden_dim
lowerCAmelCase_ : Any = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ : List[Any] = (
nn.Linear(self.prefix_hidden_dim ,lowerCAmelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ : str = GPTaConfig(
vocab_size=lowerCAmelCase__ ,n_positions=lowerCAmelCase__ ,n_embd=lowerCAmelCase__ ,n_layer=lowerCAmelCase__ ,n_head=lowerCAmelCase__ ,n_inner=lowerCAmelCase__ ,activation_function=lowerCAmelCase__ ,resid_pdrop=lowerCAmelCase__ ,embd_pdrop=lowerCAmelCase__ ,attn_pdrop=lowerCAmelCase__ ,layer_norm_epsilon=lowerCAmelCase__ ,initializer_range=lowerCAmelCase__ ,scale_attn_weights=lowerCAmelCase__ ,use_cache=lowerCAmelCase__ ,scale_attn_by_inverse_layer_idx=lowerCAmelCase__ ,reorder_and_upcast_attn=lowerCAmelCase__ ,)
lowerCAmelCase_ : List[Any] = GPTaLMHeadModel(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : str ,lowerCAmelCase__ : List[str] = None ,lowerCAmelCase__ : List[str] = None ,) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.transformer.transformer.wte(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = self.encode_prefix(lowerCAmelCase__ )
lowerCAmelCase_ : str = self.decode_prefix(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
lowerCAmelCase_ : Tuple = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
lowerCAmelCase_ : Optional[Any] = torch.cat((dummy_token, input_ids) ,dim=1 )
lowerCAmelCase_ : List[str] = self.transformer(inputs_embeds=lowerCAmelCase__ ,labels=lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
return torch.zeros(lowerCAmelCase__ ,self.prefix_length ,dtype=torch.intaa ,device=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return self.encode_prefix(lowerCAmelCase__ )
@torch.no_grad()
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.split(lowerCAmelCase__ ,1 ,dim=0 )
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Optional[Any] = []
for feature in features:
lowerCAmelCase_ : Optional[int] = self.decode_prefix(feature.to(lowerCAmelCase__ ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ : Dict = self.generate_beam(
input_embeds=lowerCAmelCase__ ,device=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ : Any = torch.stack(lowerCAmelCase__ )
lowerCAmelCase_ : int = torch.stack(lowerCAmelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Tuple=None ,lowerCAmelCase__ : Dict=None ,lowerCAmelCase__ : Optional[Any] = 5 ,lowerCAmelCase__ : Union[str, Any] = 67 ,lowerCAmelCase__ : Optional[int] = 1.0 ,lowerCAmelCase__ : List[Any] = None ,) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = eos_token_id
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Union[str, Any] = torch.ones(lowerCAmelCase__ ,device=lowerCAmelCase__ ,dtype=torch.int )
lowerCAmelCase_ : int = torch.zeros(lowerCAmelCase__ ,device=lowerCAmelCase__ ,dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ : Optional[Any] = input_embeds
else:
lowerCAmelCase_ : Dict = self.transformer.transformer.wte(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
lowerCAmelCase_ : Dict = self.transformer(inputs_embeds=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = outputs.logits
lowerCAmelCase_ : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ : List[str] = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ : int = logits.topk(lowerCAmelCase__ ,-1 )
lowerCAmelCase_ : Any = generated.expand(lowerCAmelCase__ ,*generated.shape[1:] )
lowerCAmelCase_ : Tuple = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ : Tuple = next_tokens
else:
lowerCAmelCase_ : int = tokens.expand(lowerCAmelCase__ ,*tokens.shape[1:] )
lowerCAmelCase_ : List[Any] = torch.cat((tokens, next_tokens) ,dim=1 )
else:
lowerCAmelCase_ : Tuple = -float(np.inf )
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ : Union[str, Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ : List[Any] = scores_sum_average.view(-1 ).topk(lowerCAmelCase__ ,-1 )
lowerCAmelCase_ : Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ : str = seq_lengths[next_tokens_source]
lowerCAmelCase_ : List[str] = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ : Dict = next_tokens.unsqueeze(1 )
lowerCAmelCase_ : Dict = tokens[next_tokens_source]
lowerCAmelCase_ : int = torch.cat((tokens, next_tokens) ,dim=1 )
lowerCAmelCase_ : Dict = generated[next_tokens_source]
lowerCAmelCase_ : Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ : str = is_stopped[next_tokens_source]
lowerCAmelCase_ : Optional[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
lowerCAmelCase_ : Dict = torch.cat((generated, next_token_embed) ,dim=1 )
lowerCAmelCase_ : Optional[int] = is_stopped + next_tokens.eq(lowerCAmelCase__ ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ : List[str] = scores / seq_lengths
lowerCAmelCase_ : List[str] = scores.argsort(descending=lowerCAmelCase__ )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ : Union[str, Any] = [tokens[i] for i in order]
lowerCAmelCase_ : Union[str, Any] = torch.stack(lowerCAmelCase__ ,dim=0 )
lowerCAmelCase_ : Any = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 704 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowercase ):
"""simple docstring"""
UpperCamelCase_ = (DEISMultistepScheduler,)
UpperCamelCase_ = (('num_inference_steps', 2_5),)
def UpperCAmelCase_ ( self : Tuple ,**lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**_A )
return config
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str=0 ,**lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Any = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Any = kwargs.pop("num_inference_steps" ,_A )
lowerCAmelCase_ : Any = self.dummy_sample
lowerCAmelCase_ : Optional[Any] = 0.1 * sample
lowerCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Tuple = self.get_scheduler_config(**_A )
lowerCAmelCase_ : Any = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
lowerCAmelCase_ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
lowerCAmelCase_ : str = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
lowerCAmelCase_ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ , lowerCAmelCase_ : Any = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase_ : Union[str, Any] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
lowerCAmelCase_ : int = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str=0 ,**lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : str = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Tuple = kwargs.pop("num_inference_steps" ,_A )
lowerCAmelCase_ : Optional[int] = self.dummy_sample
lowerCAmelCase_ : Dict = 0.1 * sample
lowerCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : int = self.get_scheduler_config()
lowerCAmelCase_ : str = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
lowerCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ : str = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
lowerCAmelCase_ : List[str] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Any=None ,**lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if scheduler is None:
lowerCAmelCase_ : Any = self.scheduler_classes[0]
lowerCAmelCase_ : str = self.get_scheduler_config(**_A )
lowerCAmelCase_ : str = scheduler_class(**_A )
lowerCAmelCase_ : Tuple = self.scheduler_classes[0]
lowerCAmelCase_ : int = self.get_scheduler_config(**_A )
lowerCAmelCase_ : Tuple = scheduler_class(**_A )
lowerCAmelCase_ : int = 10
lowerCAmelCase_ : Tuple = self.dummy_model()
lowerCAmelCase_ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Any = model(_A ,_A )
lowerCAmelCase_ : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def UpperCAmelCase_ ( self : str ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" ,_A )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : str = self.get_scheduler_config()
lowerCAmelCase_ : Optional[Any] = scheduler_class(**_A )
lowerCAmelCase_ : Optional[Any] = self.dummy_sample
lowerCAmelCase_ : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,"set_timesteps" ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,"set_timesteps" ):
lowerCAmelCase_ : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase_ : Dict = scheduler.timesteps[5]
lowerCAmelCase_ : str = scheduler.timesteps[6]
lowerCAmelCase_ : Optional[Any] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
lowerCAmelCase_ : int = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase_ : List[Any] = self.full_loop(scheduler=_A )
lowerCAmelCase_ : str = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
lowerCAmelCase_ : Tuple = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Any = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Any = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : List[Any] = self.full_loop(scheduler=_A )
lowerCAmelCase_ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,algorithm_type="deis" ,solver_order=_A ,solver_type=_A ,)
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,algorithm_type=_A ,)
lowerCAmelCase_ : str = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,algorithm_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.full_loop()
lowerCAmelCase_ : str = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase_ : Optional[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase_ : Optional[int] = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
lowerCAmelCase_ : List[Any] = scheduler_class(**_A )
lowerCAmelCase_ : Union[str, Any] = 10
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict = model(_A ,_A )
lowerCAmelCase_ : int = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
| 705 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = {}
state_dict.pop("pixel_mean" , lowerCAmelCase__)
state_dict.pop("pixel_std" , lowerCAmelCase__)
lowerCAmelCase_ : Any = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase_ : List[str] = key.replace(lowerCAmelCase__ , lowerCAmelCase__)
if re.match(lowerCAmelCase__ , lowerCAmelCase__):
lowerCAmelCase_ : Tuple = int(re.match(lowerCAmelCase__ , lowerCAmelCase__).group(2))
if layer_nb == 0:
lowerCAmelCase_ : Union[str, Any] = key.replace("layers.0" , "proj_in")
elif layer_nb == 1:
lowerCAmelCase_ : Tuple = key.replace("layers.1" , "layers.0")
elif layer_nb == 2:
lowerCAmelCase_ : Union[str, Any] = key.replace("layers.2" , "proj_out")
lowerCAmelCase_ : Any = value
lowerCAmelCase_ : Dict = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything"):
lowerCAmelCase_ : Union[str, Any] = hf_hub_download(lowerCAmelCase__ , F'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
lowerCAmelCase_ : List[str] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCAmelCase_ : Dict = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCAmelCase_ : List[str] = SamConfig(
vision_config=lowerCAmelCase__ , )
elif "sam_vit_h" in model_name:
lowerCAmelCase_ : List[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCAmelCase_ : int = SamConfig(
vision_config=lowerCAmelCase__ , )
lowerCAmelCase_ : List[Any] = torch.load(lowerCAmelCase__ , map_location="cpu")
lowerCAmelCase_ : Tuple = replace_keys(lowerCAmelCase__)
lowerCAmelCase_ : Union[str, Any] = SamImageProcessor()
lowerCAmelCase_ : Optional[Any] = SamProcessor(image_processor=lowerCAmelCase__)
lowerCAmelCase_ : Tuple = SamModel(lowerCAmelCase__)
hf_model.load_state_dict(lowerCAmelCase__)
lowerCAmelCase_ : Tuple = hf_model.to("cuda")
lowerCAmelCase_ : str = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCAmelCase_ : Tuple = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__).raw).convert("RGB")
lowerCAmelCase_ : List[Any] = [[[4_00, 6_50]]]
lowerCAmelCase_ : Tuple = [[1]]
lowerCAmelCase_ : List[Any] = processor(images=np.array(lowerCAmelCase__) , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Any = hf_model(**lowerCAmelCase__)
lowerCAmelCase_ : List[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowerCAmelCase_ : Union[str, Any] = processor(
images=np.array(lowerCAmelCase__) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Any = hf_model(**lowerCAmelCase__)
lowerCAmelCase_ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowerCAmelCase_ : int = ((75, 2_75, 17_25, 8_50),)
lowerCAmelCase_ : Any = processor(images=np.array(lowerCAmelCase__) , input_boxes=lowerCAmelCase__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = hf_model(**lowerCAmelCase__)
lowerCAmelCase_ : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowerCAmelCase_ : List[Any] = [[[4_00, 6_50], [8_00, 6_50]]]
lowerCAmelCase_ : List[str] = [[1, 1]]
lowerCAmelCase_ : List[Any] = processor(
images=np.array(lowerCAmelCase__) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : str = hf_model(**lowerCAmelCase__)
lowerCAmelCase_ : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
_lowercase = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
_lowercase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 706 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.