code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 1 |
'''simple docstring'''
import argparse
import copy
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = {}
with open(lowerCamelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A_ : Dict = []
_list.append([line.split()[1], line.split()[2]] )
A_ : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A_ : Optional[Any] = []
_list.append([line.split()[0], line.split()[2]] )
A_ : List[str] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ ) as f:
A_ : Any = f.read(1 )
A_ : Dict = start_node
A_ : List[str] = []
A_ : Any = start_node
A_ : Optional[int] = 0
while visiting not in first_solution:
A_ : List[Any] = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCamelCase__ ) and k[0] not in first_solution:
A_ : List[Any] = k[1]
A_ : str = k[0]
first_solution.append(lowerCamelCase__ )
A_ : Tuple = distance_of_first_solution + int(lowerCamelCase__ )
A_ : str = best_node
first_solution.append(lowerCamelCase__ )
A_ : Union[str, Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A_ : List[str] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = []
for n in solution[1:-1]:
A_ : Any = solution.index(lowerCamelCase__ )
for kn in solution[1:-1]:
A_ : Union[str, Any] = solution.index(lowerCamelCase__ )
if n == kn:
continue
A_ : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
A_ : Optional[int] = kn
A_ : List[Any] = n
A_ : Dict = 0
for k in _tmp[:-1]:
A_ : str = _tmp[_tmp.index(lowerCamelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A_ : Any = distance + int(i[1] )
_tmp.append(lowerCamelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A_ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCamelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = 1
A_ : Dict = first_solution
A_ : Dict = []
A_ : Any = distance_of_first_solution
A_ : List[Any] = solution
while count <= iters:
A_ : Optional[Any] = find_neighborhood(lowerCamelCase__ , lowerCamelCase__ )
A_ : int = 0
A_ : Union[str, Any] = neighborhood[index_of_best_solution]
A_ : Dict = len(lowerCamelCase__ ) - 1
A_ : List[str] = False
while not found:
A_ : Optional[Any] = 0
while i < len(lowerCamelCase__ ):
if best_solution[i] != solution[i]:
A_ : Any = best_solution[i]
A_ : Optional[int] = solution[i]
break
A_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A_ : int = True
A_ : List[Any] = best_solution[:-1]
A_ : Dict = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A_ : Dict = cost
A_ : List[Any] = solution
else:
A_ : int = index_of_best_solution + 1
A_ : Optional[Any] = neighborhood[index_of_best_solution]
if len(lowerCamelCase__ ) >= size:
tabu_list.pop(0 )
A_ : Union[str, Any] = count + 1
return best_solution_ever, best_cost
def a ( lowerCamelCase__=None ):
'''simple docstring'''
A_ : str = generate_neighbours(args.File )
A_, A_ : Optional[int] = generate_first_solution(
args.File , lowerCamelCase__ )
A_, A_ : Any = tabu_search(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args()) | 686 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = set()
# edges = list of graph's edges
A_ : str = get_edges(lowerCamelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
A_, A_ : Dict = edges.pop()
chosen_vertices.add(lowerCamelCase__ )
chosen_vertices.add(lowerCamelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCamelCase__ )
return chosen_vertices
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 1 |
'''simple docstring'''
import math
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCamelCase :List[str] = '''Enter the base and the power separated by a comma: '''
lowerCamelCase , lowerCamelCase :Tuple = map(int, input(prompt).split(''','''))
lowerCamelCase , lowerCamelCase :Dict = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCamelCase :Dict = res(xa, ya)
lowerCamelCase :Any = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''') | 686 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 1 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = fname.split(os.path.sep )[-1]
return re.search(r"""^(.*)_\d+\.jpg$""" , lowerCamelCase__ ).groups()[0]
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=None , lowercase=None ):
A_ : List[Any] = file_names
A_ : Tuple = image_transform
A_ : Optional[int] = label_to_id
def __len__(self ):
return len(self.file_names )
def __getitem__(self , lowercase ):
A_ : Optional[Any] = self.file_names[idx]
A_ : Union[str, Any] = PIL.Image.open(lowercase )
A_ : str = raw_image.convert("""RGB""" )
if self.image_transform is not None:
A_ : str = self.image_transform(lowercase )
A_ : Union[str, Any] = extract_label(lowercase )
if self.label_to_id is not None:
A_ : List[str] = self.label_to_id[label]
return {"image": image, "label": label}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if args.with_tracking:
A_ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
A_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Dict = config["""lr"""]
A_ : int = int(config["""num_epochs"""] )
A_ : Any = int(config["""seed"""] )
A_ : Union[str, Any] = int(config["""batch_size"""] )
A_ : Optional[int] = config["""image_size"""]
if not isinstance(lowerCamelCase__ , (list, tuple) ):
A_ : str = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
A_ : Tuple = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A_ : str = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
A_ : List[str] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A_ : List[str] = os.path.split(lowerCamelCase__ )[-1].split(""".""" )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Grab all the image filenames
A_ : List[Any] = [os.path.join(args.data_dir , lowerCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
A_ : Optional[Any] = [extract_label(lowerCamelCase__ ) for fname in file_names]
A_ : Any = list(set(lowerCamelCase__ ) )
id_to_label.sort()
A_ : List[str] = {lbl: i for i, lbl in enumerate(lowerCamelCase__ )}
# Set the seed before splitting the data.
np.random.seed(lowerCamelCase__ )
torch.manual_seed(lowerCamelCase__ )
torch.cuda.manual_seed_all(lowerCamelCase__ )
# Split our filenames between train and validation
A_ : Tuple = np.random.permutation(len(lowerCamelCase__ ) )
A_ : int = int(0.8 * len(lowerCamelCase__ ) )
A_ : List[str] = random_perm[:cut]
A_ : str = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A_ : Union[str, Any] = Compose([RandomResizedCrop(lowerCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
A_ : List[str] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowerCamelCase__ , label_to_id=lowerCamelCase__ )
# For evaluation, we use a deterministic Resize
A_ : Dict = Compose([Resize(lowerCamelCase__ ), ToTensor()] )
A_ : Union[str, Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowerCamelCase__ , label_to_id=lowerCamelCase__ )
# Instantiate dataloaders.
A_ : Any = DataLoader(lowerCamelCase__ , shuffle=lowerCamelCase__ , batch_size=lowerCamelCase__ , num_workers=4 )
A_ : Union[str, Any] = DataLoader(lowerCamelCase__ , shuffle=lowerCamelCase__ , batch_size=lowerCamelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : List[Any] = create_model("""resnet50d""" , pretrained=lowerCamelCase__ , num_classes=len(lowerCamelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : Optional[Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A_ : Dict = False
for param in model.get_classifier().parameters():
A_ : List[Any] = True
# We normalize the batches of images to be a bit faster.
A_ : Optional[Any] = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
A_ : List[Any] = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A_ : str = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A_ : List[Any] = OneCycleLR(optimizer=lowerCamelCase__ , max_lr=lowerCamelCase__ , epochs=lowerCamelCase__ , steps_per_epoch=len(lowerCamelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_, A_, A_, A_, A_ : Optional[Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A_ : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
A_ : Union[str, Any] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
A_ : Any = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A_ : Tuple = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A_ : List[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A_ : Optional[Any] = os.path.splitext(lowerCamelCase__ )[0]
if "epoch" in training_difference:
A_ : str = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
A_ : int = None
else:
A_ : Optional[int] = int(training_difference.replace("""step_""" , """""" ) )
A_ : Union[str, Any] = resume_step // len(lowerCamelCase__ )
resume_step -= starting_epoch * len(lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ , lowerCamelCase__ ):
model.train()
if args.with_tracking:
A_ : str = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A_ : List[Any] = accelerator.skip_first_batches(lowerCamelCase__ , lowerCamelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A_ : int = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A_ : Union[str, Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
A_ : Dict = (batch["""image"""] - mean) / std
A_ : Dict = model(lowerCamelCase__ )
A_ : int = torch.nn.functional.cross_entropy(lowerCamelCase__ , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowerCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : int = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A_ : str = os.path.join(args.output_dir , lowerCamelCase__ )
accelerator.save_state(lowerCamelCase__ )
model.eval()
A_ : Any = 0
A_ : int = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A_ : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
A_ : str = (batch["""image"""] - mean) / std
with torch.no_grad():
A_ : str = model(lowerCamelCase__ )
A_ : str = outputs.argmax(dim=-1 )
A_, A_ : int = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
A_ : Tuple = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A_ : Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {1_00 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 1_00 * eval_metric,
"""train_loss""": total_loss.item() / len(lowerCamelCase__ ),
"""epoch""": epoch,
} , step=lowerCamelCase__ , )
if checkpointing_steps == "epoch":
A_ : int = f'epoch_{epoch}'
if args.output_dir is not None:
A_ : Optional[int] = os.path.join(args.output_dir , lowerCamelCase__ )
accelerator.save_state(lowerCamelCase__ )
if args.with_tracking:
accelerator.end_training()
def a ( ):
'''simple docstring'''
A_ : int = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=lowerCamelCase__ , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=lowerCamelCase__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
A_ : Dict = parser.parse_args()
A_ : Optional[Any] = {"""lr""": 3E-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 2_24}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main() | 686 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 1 |
'''simple docstring'''
import os
def a ( lowerCamelCase__ = "matrix.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as in_file:
A_ : str = in_file.read()
A_ : int = [[int(lowerCamelCase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
A_ : str = [[0 for cell in row] for row in grid]
A_ : Optional[int] = len(grid[0] )
A_ : Optional[Any] = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )]
A_ : Union[str, Any] = grid[0][0]
for i in range(1 , lowerCamelCase__ ):
A_ : Union[str, Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCamelCase__ ):
A_ : str = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCamelCase__ ):
for j in range(1 , lowerCamelCase__ ):
A_ : List[Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"{solution() = }") | 686 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :Tuple = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[str] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = XLNetTokenizer
__SCREAMING_SNAKE_CASE : Tuple = XLNetTokenizerFast
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : List[str] = True
def _a (self ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Optional[Any] = XLNetTokenizer(lowercase , keep_accents=lowercase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
A_ : Union[str, Any] = """<s>"""
A_ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _a (self ):
A_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(lowercase ) , 1006 )
def _a (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a (self ):
A_ : Optional[Any] = XLNetTokenizer(lowercase , keep_accents=lowercase )
A_ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] )
A_ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : Any = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
A_ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _a (self ):
A_ : List[str] = XLNetTokenizer(lowercase , do_lower_case=lowercase )
A_ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def _a (self ):
A_ : Optional[int] = XLNetTokenizer(lowercase , do_lower_case=lowercase )
A_ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def _a (self ):
A_ : List[Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
A_ : Union[str, Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
A_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _a (self ):
# fmt: off
A_ : Any = {"""input_ids""": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , ) | 686 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DanceDiffusionPipeline
__SCREAMING_SNAKE_CASE : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__SCREAMING_SNAKE_CASE : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Dict = False
def _a (self ):
torch.manual_seed(0 )
A_ : Any = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
A_ : Any = IPNDMScheduler()
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _a (self , lowercase , lowercase=0 ):
if str(lowercase ).startswith("""mps""" ):
A_ : Dict = torch.manual_seed(lowercase )
else:
A_ : Tuple = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : int = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def _a (self ):
A_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.get_dummy_components()
A_ : List[str] = DanceDiffusionPipeline(**lowercase )
A_ : List[str] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[Any] = self.get_dummy_inputs(lowercase )
A_ : Union[str, Any] = pipe(**lowercase )
A_ : Tuple = output.audios
A_ : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
A_ : Dict = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
def _a (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : Any = torch_device
A_ : int = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
A_ : Optional[int] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = torch.manual_seed(0 )
A_ : Optional[int] = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.0_96 )
A_ : Tuple = output.audios
A_ : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A_ : Optional[int] = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
A_ : Union[str, Any] = torch_device
A_ : List[Any] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
A_ : str = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.0_96 )
A_ : List[str] = output.audios
A_ : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A_ : str = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 | 686 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 1 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
debug_launcher(test_script.main )
def _a (self ):
debug_launcher(test_ops.main ) | 686 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 1 |
'''simple docstring'''
def a ( ):
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(lowerCamelCase__ , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"{solution() = }") | 686 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowerCamelCase :Any = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
lowerCamelCase :Any = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
lowerCamelCase :Optional[int] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def _a (self , lowercase , lowercase ):
A_ : str = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : str = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : Optional[Any] = evaluate(dataset=lowercase , predictions=lowercase )
return score | 686 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def a ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 686 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :List[str] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'lxmert'
__SCREAMING_SNAKE_CASE : int = {}
def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=9500 , lowercase=1600 , lowercase=400 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=9 , lowercase=5 , lowercase=5 , lowercase=2048 , lowercase=4 , lowercase=6.67 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , **lowercase , ):
A_ : Any = vocab_size
A_ : Any = hidden_size
A_ : Optional[int] = num_attention_heads
A_ : List[str] = hidden_act
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : int = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Any = layer_norm_eps
A_ : Any = num_qa_labels
A_ : Optional[Any] = num_object_labels
A_ : Union[str, Any] = num_attr_labels
A_ : Optional[int] = l_layers
A_ : str = x_layers
A_ : Any = r_layers
A_ : int = visual_feat_dim
A_ : int = visual_pos_dim
A_ : Dict = visual_loss_normalizer
A_ : Dict = task_matched
A_ : Tuple = task_mask_lm
A_ : Any = task_obj_predict
A_ : str = task_qa
A_ : Union[str, Any] = visual_obj_loss
A_ : Optional[int] = visual_attr_loss
A_ : Any = visual_feat_loss
A_ : Optional[int] = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**lowercase ) | 686 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :str = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 10_24,
"""hidden_size""": 7_68,
"""max_length""": 5_12,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 10_24,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1E-5,
"""token_type_vocab_size""": 2,
}
A_ : Tuple = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A_ : Optional[Any] = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A_ : List[str] = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
A_ : List[str] = os.path.join(get_home_dir() , """models""" )
A_ : Optional[Any] = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ )
A_ : List[Any] = nlp.model.BERTModel(
lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , )
original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ )
A_ : Optional[int] = original_bort._collect_params_with_prefix()
# Build our config 🤗
A_ : List[Any] = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase__ ),
}
A_ : int = BertConfig.from_dict(lowerCamelCase__ )
A_ : Optional[Any] = BertForMaskedLM(lowerCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Optional[Any] = hf_param.shape
A_ : List[Any] = to_torch(params[gluon_param] )
A_ : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
A_ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
A_ : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
A_ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
A_ : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A_ : List[str] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
A_ : BertSelfAttention = layer.attention.self
A_ : List[Any] = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
A_ : Dict = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
A_ : Any = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
A_ : Tuple = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
A_ : List[str] = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
A_ : int = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
A_ : BertSelfOutput = layer.attention.output
A_ : Dict = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
A_ : Optional[int] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
A_ : str = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
A_ : Tuple = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
A_ : BertIntermediate = layer.intermediate
A_ : List[str] = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
A_ : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
A_ : BertOutput = layer.output
A_ : List[Any] = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
A_ : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
A_ : Optional[Any] = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
A_ : Tuple = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A_ : List[str] = RobertaTokenizer.from_pretrained("""roberta-base""" )
A_ : List[str] = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""]
# Get gluon output
A_ : Any = mx.nd.array([input_ids] )
A_ : str = original_bort(inputs=lowerCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase__ )
A_ : Optional[Any] = BertModel.from_pretrained(lowerCamelCase__ )
hf_bort_model.eval()
A_ : Dict = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" )
A_ : Tuple = hf_bort_model(**lowerCamelCase__ )[0]
A_ : Tuple = output_gluon[0].asnumpy()
A_ : Tuple = output_hf[0].detach().numpy()
A_ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A_ : Tuple = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 686 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 1 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowerCamelCase :str = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ):
'''simple docstring'''
A_ : List[str] = Github(os.environ["""GITHUB_TOKEN"""] )
A_ : List[str] = g.get_repo("""huggingface/transformers""" )
A_ : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
A_ : List[str] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase__ : i.created_at , reverse=lowerCamelCase__ )
A_ : Any = comments[0] if len(lowerCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main() | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
A_ : Union[str, Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A_ : List[Any] = 1
if upper_limit > 0:
A_ : Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowerCamelCase :str = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 686 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = filter(lambda lowerCamelCase__ : p.requires_grad , model.parameters() )
A_ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase :Optional[Any] = logging.getLogger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if metric == "rouge2":
A_ : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
A_ : List[str] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
A_ : Dict = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
A_ : Optional[int] = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""" )
A_ : List[Any] = ModelCheckpoint(
dirpath=lowerCamelCase__ , filename=lowerCamelCase__ , monitor=f'val_{metric}' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return EarlyStopping(
monitor=f'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=lowerCamelCase__ , verbose=lowerCamelCase__ , )
class _lowerCAmelCase ( pl.Callback ):
def _a (self , lowercase , lowercase ):
A_ : Tuple = {F'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def _a (self , lowercase , lowercase , lowercase , lowercase=True ):
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
A_ : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
A_ : Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
A_ : Any = od / """test_results.txt"""
A_ : List[str] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A_ : int = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
A_ : int = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , """a+""" ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
A_ : List[Any] = metrics[key]
if isinstance(lowercase , torch.Tensor ):
A_ : Union[str, Any] = val.item()
A_ : List[Any] = F'{key}: {val:.6f}\n'
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
A_ : Dict = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowercase )
@rank_zero_only
def _a (self , lowercase , lowercase ):
try:
A_ : List[Any] = pl_module.model.model.num_parameters()
except AttributeError:
A_ : List[str] = pl_module.model.num_parameters()
A_ : Dict = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def _a (self , lowercase , lowercase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , """test""" )
@rank_zero_only
def _a (self , lowercase , lowercase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 686 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=0.9 , lowercase=None , ):
A_ : str = parent
A_ : Tuple = batch_size
A_ : List[str] = image_size
A_ : int = num_channels
A_ : Optional[int] = patch_size
A_ : List[Any] = tubelet_size
A_ : Optional[Any] = num_frames
A_ : Any = is_training
A_ : Tuple = use_labels
A_ : Union[str, Any] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : str = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : Dict = mask_ratio
A_ : Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A_ : Union[str, Any] = (image_size // patch_size) ** 2
A_ : Any = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A_ : Optional[int] = int(mask_ratio * self.seq_length )
def _a (self ):
A_ : List[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[int] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a (self ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = VideoMAEModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Tuple = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[Any] = VideoMAEForPreTraining(lowercase )
model.to(lowercase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A_ : List[Any] = torch.ones((self.num_masks,) )
A_ : Optional[int] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A_ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
A_ : List[str] = model(lowercase , lowercase )
# model only returns predictions for masked patches
A_ : Optional[int] = mask.sum().item()
A_ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _a (self ):
A_ : Any = self.prepare_config_and_inputs()
A_, A_, A_ : Optional[Any] = config_and_inputs
A_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Tuple = False
def _a (self ):
A_ : Union[str, Any] = VideoMAEModelTester(self )
A_ : Dict = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : Optional[int] = copy.deepcopy(lowercase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A_ : Union[str, Any] = torch.ones((self.model_tester.num_masks,) )
A_ : List[Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A_ : Tuple = mask.expand(self.model_tester.batch_size , -1 ).bool()
A_ : Tuple = bool_masked_pos.to(lowercase )
if return_labels:
if model_class in [
*get_values(lowercase ),
]:
A_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(lowercase )
A_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
@slow
def _a (self ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = VideoMAEModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _a (self ):
if not self.has_attentions:
pass
else:
A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = True
for model_class in self.all_model_classes:
A_ : Tuple = self.model_tester.seq_length - self.model_tester.num_masks
A_ : str = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A_ : Union[str, Any] = True
A_ : int = False
A_ : Dict = True
A_ : Optional[Any] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Tuple = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Dict = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : Tuple = True
A_ : Any = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Any = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A_ : List[Any] = len(lowercase )
# Check attention is always last and order is fine
A_ : Optional[int] = True
A_ : List[Any] = True
A_ : Tuple = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : str = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 1 , len(lowercase ) )
A_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : Optional[Any] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Dict = outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase ) , lowercase )
A_ : Tuple = self.model_tester.seq_length - self.model_tester.num_masks
A_ : Tuple = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A_, A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : int = True
check_hidden_states_output(lowercase , lowercase , lowercase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
pass
def a ( ):
'''simple docstring'''
A_ : Dict = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A_ : Dict = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _a (self ):
A_ : List[Any] = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
lowercase )
A_ : Tuple = self.default_image_processor
A_ : Union[str, Any] = prepare_video()
A_ : Optional[int] = image_processor(lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(**lowercase )
# verify the logits
A_ : Dict = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Union[str, Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
A_ : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(lowercase )
A_ : Dict = self.default_image_processor
A_ : Any = prepare_video()
A_ : Optional[Any] = image_processor(lowercase , return_tensors="""pt""" ).to(lowercase )
# add boolean mask, indicating which patches to mask
A_ : int = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A_ : Union[str, Any] = torch.load(lowercase )
# forward pass
with torch.no_grad():
A_ : List[str] = model(**lowercase )
# verify the logits
A_ : List[str] = torch.Size([1, 1408, 1536] )
A_ : Dict = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=lowercase )
self.assertEqual(outputs.logits.shape , lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A_ : List[str] = torch.tensor([0.51_42] , device=lowercase )
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A_ : Any = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=lowercase ).to(
lowercase )
with torch.no_grad():
A_ : Optional[int] = model(**lowercase )
A_ : Optional[int] = torch.tensor(torch.tensor([0.64_69] ) , device=lowercase )
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1E-4 ) ) | 686 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase , lowercase , lowercase=0.0 , lowercase = None , lowercase = "geglu" , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = "layer_norm" , lowercase = False , ):
super().__init__()
A_ : str = only_cross_attention
A_ : Dict = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
A_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A_ : List[str] = AdaLayerNorm(lowercase , lowercase )
elif self.use_ada_layer_norm_zero:
A_ : Dict = AdaLayerNormZero(lowercase , lowercase )
else:
A_ : Optional[int] = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
A_ : int = Attention(
query_dim=lowercase , heads=lowercase , dim_head=lowercase , dropout=lowercase , bias=lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowercase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A_ : Any = (
AdaLayerNorm(lowercase , lowercase )
if self.use_ada_layer_norm
else nn.LayerNorm(lowercase , elementwise_affine=lowercase )
)
A_ : str = Attention(
query_dim=lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowercase , dim_head=lowercase , dropout=lowercase , bias=lowercase , upcast_attention=lowercase , ) # is self-attn if encoder_hidden_states is none
else:
A_ : List[Any] = None
A_ : List[str] = None
# 3. Feed-forward
A_ : Union[str, Any] = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
A_ : List[str] = FeedForward(lowercase , dropout=lowercase , activation_fn=lowercase , final_dropout=lowercase )
# let chunk size default to None
A_ : str = None
A_ : str = 0
def _a (self , lowercase , lowercase ):
# Sets chunk feed-forward
A_ : Optional[Any] = chunk_size
A_ : Union[str, Any] = dim
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A_ : str = self.norma(lowercase , lowercase )
elif self.use_ada_layer_norm_zero:
A_, A_, A_, A_, A_ : Dict = self.norma(
lowercase , lowercase , lowercase , hidden_dtype=hidden_states.dtype )
else:
A_ : List[str] = self.norma(lowercase )
A_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A_ : Any = self.attna(
lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowercase , **lowercase , )
if self.use_ada_layer_norm_zero:
A_ : Tuple = gate_msa.unsqueeze(1 ) * attn_output
A_ : Union[str, Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A_ : Union[str, Any] = (
self.norma(lowercase , lowercase ) if self.use_ada_layer_norm else self.norma(lowercase )
)
A_ : Union[str, Any] = self.attna(
lowercase , encoder_hidden_states=lowercase , attention_mask=lowercase , **lowercase , )
A_ : Union[str, Any] = attn_output + hidden_states
# 3. Feed-forward
A_ : List[str] = self.norma(lowercase )
if self.use_ada_layer_norm_zero:
A_ : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
A_ : int = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A_ : List[Any] = torch.cat(
[self.ff(lowercase ) for hid_slice in norm_hidden_states.chunk(lowercase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A_ : Tuple = self.ff(lowercase )
if self.use_ada_layer_norm_zero:
A_ : int = gate_mlp.unsqueeze(1 ) * ff_output
A_ : Optional[Any] = ff_output + hidden_states
return hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase = None , lowercase = 4 , lowercase = 0.0 , lowercase = "geglu" , lowercase = False , ):
super().__init__()
A_ : Union[str, Any] = int(dim * mult )
A_ : Union[str, Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A_ : List[str] = GELU(lowercase , lowercase )
if activation_fn == "gelu-approximate":
A_ : Dict = GELU(lowercase , lowercase , approximate="""tanh""" )
elif activation_fn == "geglu":
A_ : Dict = GEGLU(lowercase , lowercase )
elif activation_fn == "geglu-approximate":
A_ : int = ApproximateGELU(lowercase , lowercase )
A_ : str = nn.ModuleList([] )
# project in
self.net.append(lowercase )
# project dropout
self.net.append(nn.Dropout(lowercase ) )
# project out
self.net.append(nn.Linear(lowercase , lowercase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowercase ) )
def _a (self , lowercase ):
for module in self.net:
A_ : Tuple = module(lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase , lowercase = "none" ):
super().__init__()
A_ : Tuple = nn.Linear(lowercase , lowercase )
A_ : List[Any] = approximate
def _a (self , lowercase ):
if gate.device.type != "mps":
return F.gelu(lowercase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _a (self , lowercase ):
A_ : Union[str, Any] = self.proj(lowercase )
A_ : Tuple = self.gelu(lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase ):
super().__init__()
A_ : Union[str, Any] = nn.Linear(lowercase , dim_out * 2 )
def _a (self , lowercase ):
if gate.device.type != "mps":
return F.gelu(lowercase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _a (self , lowercase ):
A_, A_ : List[str] = self.proj(lowercase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(lowercase )
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase ):
super().__init__()
A_ : str = nn.Linear(lowercase , lowercase )
def _a (self , lowercase ):
A_ : str = self.proj(lowercase )
return x * torch.sigmoid(1.7_02 * x )
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase ):
super().__init__()
A_ : Tuple = nn.Embedding(lowercase , lowercase )
A_ : List[Any] = nn.SiLU()
A_ : List[str] = nn.Linear(lowercase , embedding_dim * 2 )
A_ : Any = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
def _a (self , lowercase , lowercase ):
A_ : Union[str, Any] = self.linear(self.silu(self.emb(lowercase ) ) )
A_, A_ : List[Any] = torch.chunk(lowercase , 2 )
A_ : List[Any] = self.norm(lowercase ) * (1 + scale) + shift
return x
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase ):
super().__init__()
A_ : str = CombinedTimestepLabelEmbeddings(lowercase , lowercase )
A_ : Any = nn.SiLU()
A_ : Any = nn.Linear(lowercase , 6 * embedding_dim , bias=lowercase )
A_ : List[Any] = nn.LayerNorm(lowercase , elementwise_affine=lowercase , eps=1E-6 )
def _a (self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Any = self.linear(self.silu(self.emb(lowercase , lowercase , hidden_dtype=lowercase ) ) )
A_, A_, A_, A_, A_, A_ : Union[str, Any] = emb.chunk(6 , dim=1 )
A_ : str = self.norm(lowercase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , lowercase = 1E-5 ):
super().__init__()
A_ : int = num_groups
A_ : Dict = eps
if act_fn is None:
A_ : int = None
else:
A_ : Any = get_activation(lowercase )
A_ : Dict = nn.Linear(lowercase , out_dim * 2 )
def _a (self , lowercase , lowercase ):
if self.act:
A_ : List[str] = self.act(lowercase )
A_ : Tuple = self.linear(lowercase )
A_ : Any = emb[:, :, None, None]
A_, A_ : str = emb.chunk(2 , dim=1 )
A_ : List[str] = F.group_norm(lowercase , self.num_groups , eps=self.eps )
A_ : int = x * (1 + scale) + shift
return x | 686 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 1 |
'''simple docstring'''
from typing import Any
class _lowerCAmelCase :
def __init__(self , lowercase ):
A_ : int = data
A_ : Union[str, Any] = None
def __repr__(self ):
return F'Node({self.data})'
class _lowerCAmelCase :
def __init__(self ):
A_ : List[str] = None
def __iter__(self ):
A_ : List[Any] = self.head
while node:
yield node.data
A_ : List[str] = node.next
def __len__(self ):
return sum(1 for _ in self )
def __repr__(self ):
return "->".join([str(lowercase ) for item in self] )
def __getitem__(self , lowercase ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__(self , lowercase , lowercase ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
A_ : int = self.head
for _ in range(lowercase ):
A_ : str = current.next
A_ : Optional[Any] = data
def _a (self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def _a (self , lowercase ):
self.insert_nth(0 , lowercase )
def _a (self , lowercase , lowercase ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
A_ : Dict = Node(lowercase )
if self.head is None:
A_ : Any = new_node
elif index == 0:
A_ : Optional[Any] = self.head # link new_node to head
A_ : Union[str, Any] = new_node
else:
A_ : str = self.head
for _ in range(index - 1 ):
A_ : List[Any] = temp.next
A_ : Tuple = temp.next
A_ : List[Any] = new_node
def _a (self ): # print every node data
print(self )
def _a (self ):
return self.delete_nth(0 )
def _a (self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def _a (self , lowercase = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
A_ : Tuple = self.head # default first node
if index == 0:
A_ : List[Any] = self.head.next
else:
A_ : Tuple = self.head
for _ in range(index - 1 ):
A_ : List[str] = temp.next
A_ : Optional[Any] = temp.next
A_ : str = temp.next.next
return delete_node.data
def _a (self ):
return self.head is None
def _a (self ):
A_ : List[Any] = None
A_ : Any = self.head
while current:
# Store the current node's next node.
A_ : Optional[int] = current.next
# Make the current node's next point backwards
A_ : int = prev
# Make the previous node be the current node
A_ : Tuple = current
# Make the current node the next node (to progress iteration)
A_ : Union[str, Any] = next_node
# Return prev in order to put the head at the end
A_ : Dict = prev
def a ( ):
'''simple docstring'''
A_ : List[Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCamelCase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCamelCase__ ) == i
linked_list.insert_nth(lowerCamelCase__ , i + 1 )
assert str(lowerCamelCase__ ) == "->".join(str(lowerCamelCase__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCamelCase__ ) == "->".join(str(lowerCamelCase__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCamelCase__ ) == 9
assert str(lowerCamelCase__ ) == "->".join(str(lowerCamelCase__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A_ : Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCamelCase__ ) == "->".join(str(lowerCamelCase__ ) for i in range(-8 , 1 ) )
def a ( ):
'''simple docstring'''
A_ : int = [
-9,
1_00,
Node(77_34_51_12 ),
"""dlrow olleH""",
7,
55_55,
0,
-192.55_555,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
A_ : Union[str, Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCamelCase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCamelCase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A_ : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A_ : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A_ : List[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(lowerCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCamelCase__ )
assert (
str(lowerCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCamelCase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a ( ):
'''simple docstring'''
from doctest import testmod
testmod()
A_ : List[str] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(lowerCamelCase__ )
print("""\nReading/changing Node data using indexing:""" )
print(f'Element at Position 1: {linked_list[1]}' )
A_ : Any = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(lowerCamelCase__ )
print(f'length of linked_list is : {len(lowerCamelCase__ )}' )
if __name__ == "__main__":
main() | 686 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = BertConfig.from_json_file(lowerCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
A_ : List[str] = BertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 686 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , ):
A_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18}
A_ : Any = parent
A_ : Optional[Any] = batch_size
A_ : str = num_channels
A_ : Optional[Any] = image_size
A_ : Optional[Any] = min_resolution
A_ : Any = max_resolution
A_ : Dict = do_resize
A_ : Dict = size
A_ : List[str] = apply_ocr
def _a (self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a (self ):
A_ : Any = LayoutLMvaImageProcessingTester(self )
@property
def _a (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
self.assertTrue(hasattr(lowercase , """apply_ocr""" ) )
def _a (self ):
A_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
A_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _a (self ):
pass
def _a (self ):
# Initialize image_processing
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , lowercase )
self.assertIsInstance(encoding.boxes , lowercase )
# Test batched
A_ : Any = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _a (self ):
# Initialize image_processing
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ : Dict = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _a (self ):
# Initialize image_processing
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ : Dict = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _a (self ):
# with apply_OCR = True
A_ : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ : Any = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
A_ : Optional[int] = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
A_ : str = image_processing(lowercase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ : str = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
A_ : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase )
self.assertListEqual(encoding.boxes , lowercase )
# with apply_OCR = False
A_ : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=lowercase )
A_ : List[Any] = image_processing(lowercase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 686 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = LEDTokenizer
__SCREAMING_SNAKE_CASE : str = LEDTokenizerFast
__SCREAMING_SNAKE_CASE : int = True
def _a (self ):
super().setUp()
A_ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A_ : Union[str, Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ : List[Any] = {"""unk_token""": """<unk>"""}
A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def _a (self , **lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def _a (self , **lowercase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def _a (self , lowercase ):
return "lower newer", "lower newer"
@cached_property
def _a (self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def _a (self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def _a (self ):
A_ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A_ : int = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : int = tokenizer(lowercase , max_length=len(lowercase ) , padding=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
A_ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase , lowercase )
@require_torch
def _a (self ):
A_ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : List[str] = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , lowercase )
self.assertIn("""attention_mask""" , lowercase )
self.assertNotIn("""labels""" , lowercase )
self.assertNotIn("""decoder_attention_mask""" , lowercase )
@require_torch
def _a (self ):
A_ : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : List[Any] = tokenizer(text_target=lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def _a (self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : List[str] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=lowercase , truncation=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def _a (self ):
A_ : Optional[int] = ["""A long paragraph for summarization."""]
A_ : Tuple = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[int] = tokenizer(lowercase , return_tensors="""pt""" )
A_ : int = tokenizer(text_target=lowercase , return_tensors="""pt""" )
A_ : Dict = inputs["""input_ids"""]
A_ : Tuple = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _a (self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Any = ["""Summary of the text.""", """Another summary."""]
A_ : Tuple = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
A_ : str = tokenizer(lowercase , padding=lowercase )
A_ : Optional[int] = [[0] * len(lowercase ) for x in encoded_output["""input_ids"""]]
A_ : List[str] = tokenizer.pad(lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowercase )
def _a (self ):
pass
def _a (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : List[str] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Any = """A, <mask> AllenNLP sentence."""
A_ : int = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
A_ : int = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A_ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) | 686 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase :List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", F"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", F"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qpos_proj.weight", F"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kpos_proj.weight", F"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.weight", F"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", F"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", F"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kpos_proj.weight", F"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.weight", F"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", F"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", F"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", F"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_qpos_proj.bias", F"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_kpos_proj.bias", F"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.bias", F"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", F"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", F"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_kpos_proj.bias", F"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.bias", F"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", F"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
A_ : Dict = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Any = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : Any = value
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Any = """"""
if is_panoptic:
A_ : List[str] = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Optional[int] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : int = in_proj_weight[:2_56, :]
A_ : Optional[int] = in_proj_bias[:2_56]
A_ : Any = in_proj_weight[2_56:5_12, :]
A_ : str = in_proj_bias[2_56:5_12]
A_ : Optional[int] = in_proj_weight[-2_56:, :]
A_ : Tuple = in_proj_bias[-2_56:]
def a ( ):
'''simple docstring'''
A_ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Union[str, Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : int = """resnet101"""
if "dc5" in model_name:
A_ : Any = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_50
else:
A_ : Dict = 91
A_ : int = """huggingface/label-files"""
A_ : str = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A_ : Union[str, Any] = idalabel
A_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : List[str] = ConditionalDetrImageProcessor(format=lowerCamelCase__ )
# prepare image
A_ : Optional[Any] = prepare_img()
A_ : List[str] = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
A_ : List[Any] = encoding["""pixel_values"""]
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
A_ : Tuple = torch.hub.load("""DeppMeng/ConditionalDETR""" , lowerCamelCase__ , pretrained=lowerCamelCase__ ).eval()
A_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : List[str] = """conditional_detr.""" + src
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : int = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ , is_panoptic=lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : Dict = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Dict = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : int = state_dict.pop(lowerCamelCase__ )
A_ : Dict = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : str = state_dict.pop(lowerCamelCase__ )
A_ : Any = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Optional[Any] = state_dict.pop(lowerCamelCase__ )
A_ : List[str] = val
# finally, create HuggingFace model and load state dict
A_ : Tuple = ConditionalDetrForSegmentation(lowerCamelCase__ ) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
model.push_to_hub(repo_id=lowerCamelCase__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : Dict = conditional_detr(lowerCamelCase__ )
A_ : List[str] = model(lowerCamelCase__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 1 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :str = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase__ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
A_ : Optional[Any] = torch.load(hf_hub_download(repo_id=lowerCamelCase__ , filename="""pytorch_model.bin""" ) )
A_ : List[Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
A_ : Dict = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
A_ : Dict = tensor_value
A_ : Tuple = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase__ , config=lowerCamelCase__ , state_dict=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
# convert tokenizer
A_ : str = AutoTokenizer.from_pretrained(lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 686 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 1 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
) | 686 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
move_disk(lowerCamelCase__ , lowerCamelCase__ )
move_tower(height - 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
print("""moving disk from""" , lowerCamelCase__ , """to""" , lowerCamelCase__ )
def a ( ):
'''simple docstring'''
A_ : Dict = int(input("""Height of hanoi: """ ).strip() )
move_tower(lowerCamelCase__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main() | 686 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase :Optional[Any] = False
lowerCamelCase :List[Any] = True
lowerCamelCase :Dict = False
if __name__ == "__main__":
lowerCamelCase :str = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :str = parser.parse_args()
lowerCamelCase :List[str] = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
lowerCamelCase :Optional[int] = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
lowerCamelCase :List[Any] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowerCamelCase :Dict = reader.read()
lowerCamelCase :Tuple = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowerCamelCase :Dict = UNetaDModel(**config)
else:
lowerCamelCase :Tuple = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
lowerCamelCase :Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase :Optional[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase :Any = config[key]
del config[key]
lowerCamelCase :Optional[int] = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
lowerCamelCase :int = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
lowerCamelCase :List[Any] = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowerCamelCase :List[str] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowerCamelCase :Any = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowerCamelCase :Dict = param_value
lowerCamelCase :Optional[int] = True
if not has_changed:
lowerCamelCase :int = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder)) | 686 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 1 |
'''simple docstring'''
import math
import os
import sys
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = """"""
try:
with open(lowerCamelCase__ , """rb""" ) as binary_file:
A_ : Dict = binary_file.read()
for dat in data:
A_ : int = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
lexicon.pop(lowerCamelCase__ )
A_ : Dict = last_match_id
if math.loga(lowerCamelCase__ ).is_integer():
for curr_key in lexicon:
A_ : int = """0""" + lexicon[curr_key]
A_ : Union[str, Any] = bin(lowerCamelCase__ )[2:]
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {"""0""": """0""", """1""": """1"""}
A_, A_ : Any = """""", """"""
A_ : Optional[int] = len(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A_ : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
index += 1
A_ : List[str] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A_ : int = lexicon[curr_string]
result += last_match_id
return result
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = os.path.getsize(lowerCamelCase__ )
A_ : Dict = bin(lowerCamelCase__ )[2:]
A_ : Dict = len(lowerCamelCase__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = 8
try:
with open(lowerCamelCase__ , """wb""" ) as opened_file:
A_ : str = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCamelCase__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = read_file_binary(lowerCamelCase__ )
A_ : Union[str, Any] = compress_data(lowerCamelCase__ )
A_ : int = add_file_length(lowerCamelCase__ , lowerCamelCase__ )
write_file_binary(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 686 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
'''simple docstring'''
if attention_mask is None:
A_ : Tuple = tf.cast(tf.math.not_equal(lowerCamelCase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = OPTConfig
__SCREAMING_SNAKE_CASE : int = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = 'gelu'
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ):
A_ : Union[str, Any] = parent
A_ : List[Any] = batch_size
A_ : Optional[int] = seq_length
A_ : List[str] = is_training
A_ : List[str] = use_labels
A_ : Optional[int] = vocab_size
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Dict = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : Any = eos_token_id
A_ : Tuple = pad_token_id
A_ : List[str] = bos_token_id
A_ : str = embed_dim
A_ : Optional[int] = word_embed_proj_dim
A_ : Any = False
def _a (self ):
A_ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A_ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A_ : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
A_ : Dict = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , )
A_ : Tuple = prepare_opt_inputs_dict(lowercase , lowercase )
return config, inputs_dict
def _a (self , lowercase , lowercase ):
A_ : int = TFOPTModel(config=lowercase )
A_ : str = inputs_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids[:1, :]
A_ : Union[str, Any] = inputs_dict["""attention_mask"""][:1, :]
A_ : List[Any] = 1
# first forward pass
A_ : Tuple = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
A_, A_ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A_ : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A_ : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A_ : Dict = model(lowercase , attention_mask=lowercase )[0]
A_ : str = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
A_ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = (TFOPTForCausalLM,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : Optional[int] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : int = 10
def _a (self ):
A_ : Optional[Any] = TFOPTModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def _a (self ):
A_, A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase , lowercase ):
if hasattr(lowercase , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
A_ : int = model_class(config=lowercase )
A_ : List[str] = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
A_ : Optional[int] = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase )
A_ : List[Any] = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
A_ : int = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
A_ : Optional[int] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase )
# check that weights remain the same after resizing
A_ : List[str] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
A_ : List[Any] = False
self.assertTrue(lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase )
A_ : Any = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
A_ : Union[str, Any] = False
self.assertTrue(lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return tf.constant(lowerCamelCase__ , dtype=tf.intaa )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = 99
def _a (self ):
A_ : Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
A_ : Any = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
A_ : str = input_ids.shape[0]
A_ : List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : List[str] = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
A_ : Tuple = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
A_ : Union[str, Any] = tf.not_equal(lowercase , model.config.pad_token_id )
with tf.GradientTape():
A_ : Union[str, Any] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state
A_ : int = (1, 11, 512)
self.assertEqual(output.shape , lowercase )
A_ : List[Any] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) )
A_ : Dict = tf.function(lowercase , jit_compile=lowercase )
A_ : List[str] = xla_generate(lowercase , lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) )
@require_tf
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
super().setUp()
A_ : Dict = """facebook/opt-350m"""
def _a (self ):
A_ : Tuple = TFOPTForCausalLM.from_pretrained(self.path_model )
A_ : Tuple = GPTaTokenizer.from_pretrained(self.path_model )
A_ : Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
A_ : Optional[Any] = tokenizer(lowercase , return_tensors="""tf""" , padding=lowercase , add_special_tokens=lowercase )
A_ : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
A_ : Optional[Any] = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
A_ : Any = tf.function(lowercase , jit_compile=lowercase )
A_ : str = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
@require_tf
@slow
class _lowerCAmelCase ( unittest.TestCase ):
@property
def _a (self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _a (self ):
A_ : Tuple = """facebook/opt-125m"""
A_ : Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
A_ : str = []
A_ : int = GPTaTokenizer.from_pretrained(lowercase )
A_ : Dict = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
A_ : int = tokenizer(lowercase , return_tensors="""tf""" ).input_ids
A_ : List[str] = model.generate(lowercase , max_length=10 )
A_ : Tuple = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : str = """facebook/opt-350m"""
A_ : Tuple = GPTaTokenizer.from_pretrained(lowercase )
A_ : List[Any] = TFOPTForCausalLM.from_pretrained(lowercase )
A_ : Union[str, Any] = """left"""
# use different length sentences to test batching
A_ : int = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A_ : Tuple = tokenizer(lowercase , return_tensors="""tf""" , padding=lowercase )
A_ : Optional[Any] = inputs["""input_ids"""]
A_ : int = model.generate(input_ids=lowercase , attention_mask=inputs["""attention_mask"""] )
A_ : Tuple = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
A_ : Dict = model.generate(input_ids=lowercase )
A_ : int = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
A_ : List[Any] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
A_ : Optional[int] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
A_ : List[str] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
A_ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
A_ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
A_ : Tuple = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
def _a (self ):
A_ : str = """facebook/opt-350m"""
A_ : int = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
A_ : Union[str, Any] = []
A_ : List[str] = GPTaTokenizer.from_pretrained(lowercase )
A_ : Union[str, Any] = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
A_ : str = tokenizer(lowercase , return_tensors="""tf""" ).input_ids
A_ : List[str] = model.generate(lowercase , max_length=10 )
A_ : int = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase ) | 686 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ):
'''simple docstring'''
A_ : List[str] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(""" """ ) else {}
A_ : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ):
'''simple docstring'''
A_ : Tuple = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase="train" , lowercase=None , lowercase=None , lowercase=None , lowercase="" , ):
super().__init__()
A_ : List[str] = Path(lowercase ).joinpath(type_path + """.source""" )
A_ : Any = Path(lowercase ).joinpath(type_path + """.target""" )
A_ : int = self.get_char_lens(self.src_file )
A_ : Dict = max_source_length
A_ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
A_ : Any = tokenizer
A_ : List[Any] = prefix
if n_obs is not None:
A_ : int = self.src_lens[:n_obs]
A_ : List[Any] = src_lang
A_ : Tuple = tgt_lang
def __len__(self ):
return len(self.src_lens )
def __getitem__(self , lowercase ):
A_ : Optional[int] = index + 1 # linecache starts at 1
A_ : Dict = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("""\n""" )
A_ : int = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("""\n""" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
A_ : int = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
A_ : int = encode_line(lowercase , lowercase , self.max_source_length , """right""" )
A_ : int = encode_line(lowercase , lowercase , self.max_target_length , """right""" )
A_ : List[str] = source_inputs["""input_ids"""].squeeze()
A_ : Any = target_inputs["""input_ids"""].squeeze()
A_ : Dict = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a (lowercase ):
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def _a (self , lowercase ):
A_ : Any = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Tuple = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
A_ : Dict = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
A_ : str = trim_batch(lowercase , lowercase )
A_, A_ : Tuple = trim_batch(lowercase , lowercase , attention_mask=lowercase )
A_ : Any = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
lowerCamelCase :int = getLogger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , """git_log.json""" ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
A_ : int = git.Repo(search_parent_directories=lowerCamelCase__ )
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """wb""" ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
def remove_articles(lowerCamelCase__ ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
A_ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = normalize_answer(lowerCamelCase__ ).split()
A_ : Dict = normalize_answer(lowerCamelCase__ ).split()
A_ : Optional[Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
A_ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase__ )
A_ : int = 1.0 * num_same / len(lowerCamelCase__ )
A_ : int = (2 * precision * recall) / (precision + recall)
return fa
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
A_ : Tuple = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def a ( lowerCamelCase__ ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : List[str] = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
A_ : Tuple = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config | 686 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 1 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def a ( lowerCamelCase__ , lowerCamelCase__=10_00 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A_ : Optional[int] = n - 1
A_ : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A_ : Optional[Any] = 0
while count < prec:
A_ : List[str] = random.randint(2 , n - 1 )
A_ : str = bin_exp_mod(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if b != 1:
A_ : Optional[Any] = True
for _ in range(lowerCamelCase__ ):
if b == n - 1:
A_ : Optional[int] = False
break
A_ : Any = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase :Optional[int] = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ ):
'''simple docstring'''
return len(set(lowerCamelCase__ ) ) == len(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 1 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase = None , lowercase = None , lowercase=None , lowercase=None ):
if not conversation_id:
A_ : Tuple = uuid.uuida()
if past_user_inputs is None:
A_ : Optional[Any] = []
if generated_responses is None:
A_ : Optional[Any] = []
A_ : uuid.UUID = conversation_id
A_ : List[str] = past_user_inputs
A_ : List[str] = generated_responses
A_ : Optional[str] = text
def __eq__(self , lowercase ):
if not isinstance(lowercase , lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _a (self , lowercase , lowercase = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
A_ : Optional[Any] = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
A_ : Dict = text
def _a (self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A_ : str = None
def _a (self , lowercase ):
self.generated_responses.append(lowercase )
def _a (self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self ):
A_ : Union[str, Any] = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
A_ : Union[str, Any] = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__UpperCAmelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , *lowercase , **lowercase ):
super().__init__(*lowercase , **lowercase )
if self.tokenizer.pad_token_id is None:
A_ : str = self.tokenizer.eos_token
def _a (self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
A_ : Tuple = {}
A_ : Optional[int] = {}
A_ : Tuple = {}
if min_length_for_response is not None:
A_ : Optional[int] = min_length_for_response
if minimum_tokens is not None:
A_ : Dict = minimum_tokens
if "max_length" in generate_kwargs:
A_ : Optional[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A_ : Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__(self , lowercase , lowercase=0 , **lowercase ):
A_ : Optional[int] = super().__call__(lowercase , num_workers=lowercase , **lowercase )
if isinstance(lowercase , lowercase ) and len(lowercase ) == 1:
return outputs[0]
return outputs
def _a (self , lowercase , lowercase=32 ):
if not isinstance(lowercase , lowercase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
A_ : int = self.tokenizer._build_conversation_input_ids(lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A_ : List[Any] = self._legacy_parse_and_tokenize(lowercase )
if self.framework == "pt":
A_ : str = torch.LongTensor([input_ids] )
elif self.framework == "tf":
A_ : Optional[int] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _a (self , lowercase , lowercase=10 , **lowercase ):
A_ : Dict = generate_kwargs.get("""max_length""" , self.model.config.max_length )
A_ : Any = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
A_ : Any = max_length - minimum_tokens
A_ : Optional[int] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
A_ : int = model_inputs["""attention_mask"""][:, -trim:]
A_ : Union[str, Any] = model_inputs.pop("""conversation""" )
A_ : Optional[int] = max_length
A_ : List[Any] = self.model.generate(**lowercase , **lowercase )
if self.model.config.is_encoder_decoder:
A_ : Dict = 1
else:
A_ : Tuple = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _a (self , lowercase , lowercase=True ):
A_ : Tuple = model_outputs["""output_ids"""]
A_ : Optional[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase , )
A_ : Dict = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(lowercase )
return conversation
def _a (self , lowercase ):
A_ : str = self.tokenizer.eos_token_id
A_ : Optional[int] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase , add_special_tokens=lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowercase , add_special_tokens=lowercase ) )
if len(lowercase ) > self.tokenizer.model_max_length:
A_ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 686 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 1 |
'''simple docstring'''
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( lowerCamelCase__ = 0.1 ):
'''simple docstring'''
A_ : Any = 3
A_ : Union[str, Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 1 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase :List[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCamelCase :List[Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : int = LEDTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = ['input_ids', 'attention_mask']
def __init__(self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
A_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
A_ : Dict = getattr(lowercase , pre_tok_state.pop("""type""" ) )
A_ : List[Any] = add_prefix_space
A_ : Dict = pre_tok_class(**lowercase )
A_ : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : Any = """post_processor"""
A_ : Dict = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
A_ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
A_ : List[str] = tuple(state["""cls"""] )
A_ : List[Any] = False
if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
A_ : Optional[Any] = add_prefix_space
A_ : Optional[Any] = True
if state.get("""trim_offsets""" , lowercase ) != trim_offsets:
A_ : List[Any] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : List[Any] = getattr(lowercase , state.pop("""type""" ) )
A_ : List[Any] = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _a (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a (self , lowercase ):
A_ : int = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
A_ : List[Any] = value
def _a (self , *lowercase , **lowercase ):
A_ : List[str] = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowercase , **lowercase )
def _a (self , lowercase , lowercase = None ):
A_ : int = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def _a (self , lowercase , lowercase=None ):
A_ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a (self , lowercase , lowercase = None ):
A_ : Dict = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a (self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ):
A_ : Optional[int] = super()._pad(
encoded_inputs=lowercase , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
# Load from model defaults
if return_attention_mask is None:
A_ : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A_ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A_ : Union[str, Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowercase )
if needs_to_be_padded:
A_ : Dict = len(lowercase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A_ : Dict = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A_ : Union[str, Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 686 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : Tuple = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : Tuple = -1
return False
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return [] | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def a ( lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
try:
A_ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
A_ : Any = strtobool(lowerCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowerCamelCase :str = parse_flag_from_env('''RUN_SLOW''', default=False)
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(lowerCamelCase__ )
def a ( lowerCamelCase__=None , lowerCamelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCamelCase__ , version=lowerCamelCase__ )
return unittest.skipUnless(is_torch_version(""">=""" , lowerCamelCase__ ) , f'test requires torch version >= {version}' )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(lowerCamelCase__ )
lowerCamelCase :str = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def a ( lowerCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(lowerCamelCase__ )
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = True
@classmethod
def _a (cls ):
A_ : Tuple = tempfile.mkdtemp()
@classmethod
def _a (cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _a (self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase )
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self , lowercase ):
A_ : Optional[int] = mocks if isinstance(lowercase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = AcceleratorState()
A_ : str = tensor[None].clone().to(state.device )
A_ : Optional[int] = gather(lowerCamelCase__ ).cpu()
A_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase__ ):
return False
return True
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : str = returncode
A_ : Union[str, Any] = stdout
A_ : Dict = stderr
async def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
while True:
A_ : Optional[int] = await stream.readline()
if line:
callback(lowerCamelCase__ )
else:
break
async def a ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(lowerCamelCase__ ) )
A_ : List[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A_ : Union[str, Any] = []
A_ : List[Any] = []
def tee(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" ):
A_ : List[Any] = line.decode("""utf-8""" ).rstrip()
sink.append(lowerCamelCase__ )
if not quiet:
print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=lowerCamelCase__ , )
return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=1_80 , lowerCamelCase__=False , lowerCamelCase__=True ):
'''simple docstring'''
A_ : Tuple = asyncio.get_event_loop()
A_ : Tuple = loop.run_until_complete(
_stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) )
A_ : Optional[Any] = """ """.join(lowerCamelCase__ )
if result.returncode > 0:
A_ : int = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
def a ( lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
try:
A_ : Dict = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase__ , """decode""" ):
A_ : str = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e | 686 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase :
def __init__(self , lowercase=False , lowercase=False , lowercase=6.0 , lowercase=None , lowercase=False , lowercase=False , lowercase=None , lowercase="fp4" , lowercase=False , **lowercase , ):
A_ : Optional[int] = load_in_abit
A_ : int = load_in_abit
A_ : List[str] = llm_inta_threshold
A_ : Any = llm_inta_skip_modules
A_ : str = llm_inta_enable_fpaa_cpu_offload
A_ : str = llm_inta_has_fpaa_weight
A_ : Optional[int] = bnb_abit_quant_type
A_ : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
A_ : Optional[int] = torch.floataa
elif isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = getattr(lowercase , lowercase )
elif isinstance(lowercase , torch.dtype ):
A_ : int = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def _a (self ):
if not isinstance(self.llm_inta_threshold , lowercase ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowercase ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowercase ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , lowercase ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , lowercase ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , lowercase ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def _a (self ):
return self.load_in_abit or self.load_in_abit
def _a (self ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
A_ : Dict = cls(**lowercase )
A_ : Tuple = []
for key, value in kwargs.items():
if hasattr(lowercase , lowercase ):
setattr(lowercase , lowercase , lowercase )
to_remove.append(lowercase )
for key in to_remove:
kwargs.pop(lowercase , lowercase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _a (self , lowercase ):
with open(lowercase , """w""" , encoding="""utf-8""" ) as writer:
A_ : int = self.to_dict()
A_ : int = json.dumps(lowercase , indent=2 , sort_keys=lowercase ) + """\n"""
writer.write(lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : List[Any] = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__(self ):
return F'{self.__class__.__name__} {self.to_json_string()}'
def _a (self , lowercase = True ):
if use_diff is True:
A_ : List[str] = self.to_diff_dict()
else:
A_ : Optional[int] = self.to_dict()
return json.dumps(lowercase , indent=2 , sort_keys=lowercase ) + "\n"
def _a (self ):
A_ : List[Any] = self.to_dict()
# get the default config dict
A_ : int = BitsAndBytesConfig().to_dict()
A_ : str = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
A_ : Optional[int] = value
return serializable_config_dict | 686 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self , lowercase , lowercase ):
A_ : Dict = jnp.ones((batch_size, length) ) / length
return scores
def _a (self ):
A_ : List[Any] = None
A_ : Optional[int] = 20
A_ : Tuple = self._get_uniform_logits(batch_size=2 , length=lowercase )
# tweak scores to not be uniform anymore
A_ : List[Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
A_ : str = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
A_ : List[str] = jax.nn.softmax(lowercase , axis=-1 )
A_ : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
A_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(lowercase , scores.copy() , cur_len=lowercase ) , axis=-1 )
A_ : Dict = jax.nn.softmax(temp_dist_warper_smoother(lowercase , scores.copy() , cur_len=lowercase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _a (self ):
A_ : Optional[Any] = None
A_ : str = 10
A_ : Union[str, Any] = 2
# create ramp distribution
A_ : List[Any] = np.broadcast_to(np.arange(lowercase )[None, :] , (batch_size, vocab_size) ).copy()
A_ : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
A_ : List[str] = FlaxTopKLogitsWarper(3 )
A_ : Dict = top_k_warp(lowercase , lowercase , cur_len=lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
A_ : Union[str, Any] = 5
A_ : Optional[int] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
A_ : Tuple = np.broadcast_to(np.arange(lowercase )[None, :] , (batch_size, length) ).copy()
A_ : Optional[int] = top_k_warp_safety_check(lowercase , lowercase , cur_len=lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _a (self ):
A_ : Any = None
A_ : Any = 10
A_ : Optional[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A_ : int = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
A_ : Optional[int] = FlaxTopPLogitsWarper(0.8 )
A_ : Optional[int] = np.exp(top_p_warp(lowercase , lowercase , cur_len=lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A_ : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
A_ : Any = np.broadcast_to(np.arange(lowercase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A_ : Optional[int] = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
A_ : int = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
A_ : str = top_p_warp(lowercase , lowercase , cur_len=lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _a (self ):
A_ : str = 20
A_ : int = 4
A_ : Optional[Any] = 0
A_ : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase )
# check that min length is applied at length 5
A_ : Union[str, Any] = ids_tensor((batch_size, 20) , vocab_size=20 )
A_ : Any = 5
A_ : Union[str, Any] = self._get_uniform_logits(lowercase , lowercase )
A_ : Tuple = min_dist_processor(lowercase , lowercase , cur_len=lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
A_ : Tuple = self._get_uniform_logits(lowercase , lowercase )
A_ : Union[str, Any] = 15
A_ : Any = min_dist_processor(lowercase , lowercase , cur_len=lowercase )
self.assertFalse(jnp.isinf(lowercase ).any() )
def _a (self ):
A_ : str = 20
A_ : Union[str, Any] = 4
A_ : List[Any] = 0
A_ : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase )
# check that all scores are -inf except the bos_token_id score
A_ : Tuple = ids_tensor((batch_size, 1) , vocab_size=20 )
A_ : Union[str, Any] = 1
A_ : Any = self._get_uniform_logits(lowercase , lowercase )
A_ : Any = logits_processor(lowercase , lowercase , cur_len=lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A_ : Dict = 3
A_ : Any = self._get_uniform_logits(lowercase , lowercase )
A_ : int = logits_processor(lowercase , lowercase , cur_len=lowercase )
self.assertFalse(jnp.isinf(lowercase ).any() )
def _a (self ):
A_ : Dict = 20
A_ : Optional[Any] = 4
A_ : List[Any] = 0
A_ : Tuple = 5
A_ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase , eos_token_id=lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
A_ : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
A_ : Any = 4
A_ : Optional[int] = self._get_uniform_logits(lowercase , lowercase )
A_ : List[Any] = logits_processor(lowercase , lowercase , cur_len=lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A_ : Optional[int] = 3
A_ : List[Any] = self._get_uniform_logits(lowercase , lowercase )
A_ : Tuple = logits_processor(lowercase , lowercase , cur_len=lowercase )
self.assertFalse(jnp.isinf(lowercase ).any() )
def _a (self ):
A_ : Tuple = 4
A_ : List[Any] = 10
A_ : Union[str, Any] = 15
A_ : Optional[int] = 2
A_ : int = 1
A_ : Any = 15
# dummy input_ids and scores
A_ : Optional[int] = ids_tensor((batch_size, sequence_length) , lowercase )
A_ : Optional[Any] = input_ids.copy()
A_ : Tuple = self._get_uniform_logits(lowercase , lowercase )
A_ : Dict = scores.copy()
# instantiate all dist processors
A_ : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : List[str] = FlaxTopKLogitsWarper(3 )
A_ : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase )
A_ : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase )
A_ : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase , eos_token_id=lowercase )
A_ : Optional[Any] = 10
# no processor list
A_ : int = temp_dist_warp(lowercase , lowercase , cur_len=lowercase )
A_ : Dict = top_k_warp(lowercase , lowercase , cur_len=lowercase )
A_ : Tuple = top_p_warp(lowercase , lowercase , cur_len=lowercase )
A_ : int = min_dist_proc(lowercase , lowercase , cur_len=lowercase )
A_ : Optional[Any] = bos_dist_proc(lowercase , lowercase , cur_len=lowercase )
A_ : Optional[int] = eos_dist_proc(lowercase , lowercase , cur_len=lowercase )
# with processor list
A_ : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : int = processor(lowercase , lowercase , cur_len=lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(lowercase , lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _a (self ):
A_ : Optional[int] = 4
A_ : Any = 10
A_ : int = 15
A_ : Tuple = 2
A_ : List[Any] = 1
A_ : Optional[int] = 15
# dummy input_ids and scores
A_ : Dict = ids_tensor((batch_size, sequence_length) , lowercase )
A_ : Dict = input_ids.copy()
A_ : Any = self._get_uniform_logits(lowercase , lowercase )
A_ : Union[str, Any] = scores.copy()
# instantiate all dist processors
A_ : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Dict = FlaxTopKLogitsWarper(3 )
A_ : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase )
A_ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase )
A_ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase , eos_token_id=lowercase )
A_ : Any = 10
# no processor list
def run_no_processor_list(lowercase , lowercase , lowercase ):
A_ : List[str] = temp_dist_warp(lowercase , lowercase , cur_len=lowercase )
A_ : List[str] = top_k_warp(lowercase , lowercase , cur_len=lowercase )
A_ : Optional[int] = top_p_warp(lowercase , lowercase , cur_len=lowercase )
A_ : Any = min_dist_proc(lowercase , lowercase , cur_len=lowercase )
A_ : Any = bos_dist_proc(lowercase , lowercase , cur_len=lowercase )
A_ : List[str] = eos_dist_proc(lowercase , lowercase , cur_len=lowercase )
return scores
# with processor list
def run_processor_list(lowercase , lowercase , lowercase ):
A_ : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : Optional[int] = processor(lowercase , lowercase , cur_len=lowercase )
return scores
A_ : Optional[Any] = jax.jit(lowercase )
A_ : Tuple = jax.jit(lowercase )
A_ : Optional[Any] = jitted_run_no_processor_list(lowercase , lowercase , lowercase )
A_ : Any = jitted_run_processor_list(lowercase , lowercase , lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(lowercase , lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 686 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 1 |
'''simple docstring'''
import os
def a ( ):
'''simple docstring'''
with open(os.path.dirname(lowerCamelCase__ ) + """/grid.txt""" ) as f:
A_ : Any = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCamelCase__ ) for x in f.readline().split()] )
A_ : Tuple = 0
# right
for i in range(20 ):
for j in range(17 ):
A_ : Optional[int] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A_ : Optional[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
A_ : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A_ : int = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A_ : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A_ : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
A_ : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A_ : Optional[Any] = temp
return maximum
if __name__ == "__main__":
print(solution()) | 686 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = LxmertTokenizer
__SCREAMING_SNAKE_CASE : int = LxmertTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : str = True
def _a (self ):
super().setUp()
A_ : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _a (self , lowercase ):
A_ : Union[str, Any] = """UNwant\u00E9d,running"""
A_ : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def _a (self ):
A_ : str = self.tokenizer_class(self.vocab_file )
A_ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def _a (self ):
if not self.test_rust_tokenizer:
return
A_ : Dict = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Tuple = """I was born in 92000, and this is falsé."""
A_ : Tuple = tokenizer.tokenize(lowercase )
A_ : str = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A_ : str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = tokenizer.encode(lowercase )
A_ : List[str] = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase ) | 686 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCamelCase :Optional[int] = False
class _lowerCAmelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : List[Any] = pipe(
image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
A_ : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Optional[Any] = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 686 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : str = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = 'tokenizer_file'
__SCREAMING_SNAKE_CASE : List[Any] = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def _a (self ):
super().setUp()
A_ : Optional[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def _a (self ):
A_ : Tuple = self.get_rust_tokenizer()
A_ : Any = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
A_ : Optional[Any] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
A_ : Optional[int] = tokenizer.batch_encode_plus(lowercase )["""input_ids"""]
self.assertListEqual(lowercase , lowercase )
A_ : Optional[Any] = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self , lowercase=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
A_ : Union[str, Any] = """This is a simple input"""
A_ : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ : Union[str, Any] = ("""This is a simple input""", """This is a pair""")
A_ : List[str] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(lowercase , max_length=lowercase )
tokenizer_r.encode_plus(lowercase , max_length=lowercase )
tokenizer_r.batch_encode_plus(lowercase , max_length=lowercase )
tokenizer_r.encode(lowercase , max_length=lowercase )
tokenizer_r.batch_encode_plus(lowercase , max_length=lowercase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
A_ : Dict = None # Hotfixing padding = None
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="""max_length""" , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="""max_length""" , )
def _a (self ):
A_ : List[Any] = self.get_rust_tokenizer()
A_ : Dict = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=lowercase )
A_ : List[Any] = next(iter(lowercase ) )["""premise"""] # pick up one data
A_ : Any = list(sample_data.values() )
A_ : Optional[Any] = list(map(tokenizer.encode , lowercase ) )
A_ : str = [tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) for x in output_tokens]
self.assertListEqual(lowercase , lowercase )
def _a (self ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 ) | 686 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 1 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :str = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 700 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCamelCase :Optional[Any] = datasets.utils.logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = ['''names''', '''prefix''']
lowerCamelCase :Optional[int] = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
lowerCamelCase :str = ['''encoding_errors''', '''on_bad_lines''']
lowerCamelCase :str = ['''date_format''']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ','
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[Any] = 'infer'
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : int = '.'
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : List[str] = '"'
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Optional[Any] = 10_000
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = 'strict'
__SCREAMING_SNAKE_CASE : List[str] = 'error'
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
def _a (self ):
if self.delimiter is not None:
A_ : int = self.delimiter
if self.column_names is not None:
A_ : Optional[int] = self.column_names
@property
def _a (self ):
A_ : List[Any] = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CsvConfig
def _a (self ):
return datasets.DatasetInfo(features=self.config.features )
def _a (self , lowercase ):
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
A_ : List[str] = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A_ : Union[str, Any] = [files]
A_ : str = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A_ : List[str] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A_ : Optional[Any] = [files]
A_ : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def _a (self , lowercase ):
if self.config.features is not None:
A_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
A_ : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A_ : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _a (self , lowercase ):
A_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A_ : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
A_ : List[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
A_ : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}' )
raise | 701 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 0 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _lowerCAmelCase ( snake_case__ ):
__SCREAMING_SNAKE_CASE : str = """encodec"""
def __init__(self , lowercase=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase=24000 , lowercase=1 , lowercase=False , lowercase=None , lowercase=None , lowercase=128 , lowercase=32 , lowercase=1 , lowercase=[8, 5, 4, 2] , lowercase="weight_norm" , lowercase=7 , lowercase=7 , lowercase=3 , lowercase=2 , lowercase=True , lowercase="reflect" , lowercase=2 , lowercase=2 , lowercase=1.0 , lowercase=1024 , lowercase=None , lowercase=True , **lowercase , ):
A_ : Optional[Any] = target_bandwidths
A_ : Union[str, Any] = sampling_rate
A_ : List[str] = audio_channels
A_ : int = normalize
A_ : Tuple = chunk_length_s
A_ : Dict = overlap
A_ : Tuple = hidden_size
A_ : Optional[Any] = num_filters
A_ : str = num_residual_layers
A_ : List[str] = upsampling_ratios
A_ : str = norm_type
A_ : Optional[Any] = kernel_size
A_ : str = last_kernel_size
A_ : List[str] = residual_kernel_size
A_ : int = dilation_growth_rate
A_ : Tuple = use_causal_conv
A_ : Optional[int] = pad_mode
A_ : Dict = compress
A_ : Union[str, Any] = num_lstm_layers
A_ : int = trim_right_ratio
A_ : Tuple = codebook_size
A_ : Tuple = codebook_dim if codebook_dim is not None else hidden_size
A_ : Optional[int] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}' )
super().__init__(**UpperCAmelCase_ )
@property
def _a (self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _a (self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _a (self ):
A_ : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _a (self ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 702 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 0 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Dict = logging.get_logger(__name__)
class _lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = "linear"
__SCREAMING_SNAKE_CASE : Tuple = "cosine"
__SCREAMING_SNAKE_CASE : Tuple = "cosine_with_restarts"
__SCREAMING_SNAKE_CASE : int = "polynomial"
__SCREAMING_SNAKE_CASE : Optional[int] = "constant"
__SCREAMING_SNAKE_CASE : Optional[Any] = "constant_with_warmup"
__SCREAMING_SNAKE_CASE : List[Any] = "piecewise_constant"
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ) -> Optional[Any]:
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ) -> List[str]:
'''simple docstring'''
A_ : List[Any] = {}
A_ : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : List[Any] = rule_str.split(""":""" )
A_ : str = int(lowerCamelCase__ )
A_ : Optional[Any] = float(lowerCamelCase__ )
A_ : List[str] = value
A_ : List[str] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : Tuple = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ) -> Any:
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ) -> Dict:
'''simple docstring'''
A_ : List[str] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : Any = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Dict = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Union[str, Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :Optional[int] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ) -> Union[str, Any]:
'''simple docstring'''
A_ : Optional[int] = SchedulerType(lowerCamelCase__ )
A_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 703 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=32 , lowercase=3 , lowercase=4 , lowercase=[10, 20, 30, 40] , lowercase=[2, 2, 3, 2] , lowercase=True , lowercase=True , lowercase=37 , lowercase="gelu" , lowercase=10 , lowercase=0.02 , lowercase=["stage2", "stage3", "stage4"] , lowercase=3 , lowercase=None , ):
A_ : Union[str, Any] = parent
A_ : Optional[int] = batch_size
A_ : List[Any] = image_size
A_ : Union[str, Any] = num_channels
A_ : List[Any] = num_stages
A_ : Optional[int] = hidden_sizes
A_ : int = depths
A_ : Optional[int] = is_training
A_ : Any = use_labels
A_ : Any = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Tuple = type_sequence_label_size
A_ : Union[str, Any] = initializer_range
A_ : Tuple = out_features
A_ : Tuple = num_labels
A_ : Any = scope
A_ : int = num_stages
def _a (self ):
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : List[str] = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[Any] = self.get_config()
return config, pixel_values, labels
def _a (self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _a (self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[Any] = UperNetForSemanticSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A_ : int = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _a (self ):
A_ : Tuple = self.prepare_config_and_inputs()
(
A_
) : str = config_and_inputs
A_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def _a (self ):
A_ : Tuple = UperNetModelTester(self )
A_ : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def _a (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
return
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(UpperCamelCase_ )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[Any] = [*signature.parameters.keys()]
A_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _a (self ):
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _a (self ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def _a (self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
pass
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : Optional[int] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
A_ : str = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
A_ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : int = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = _config_zero_init(UpperCamelCase_ )
A_ : Any = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A_ : str = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _a (self ):
pass
@slow
def _a (self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Any = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def a ( ):
'''simple docstring'''
A_ : Dict = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
A_ : Optional[int] = Image.open(_lowercase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
A_ : List[str] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCamelCase_ )
A_ : Optional[Any] = prepare_img()
A_ : Optional[int] = processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
with torch.no_grad():
A_ : List[str] = model(**UpperCamelCase_ )
A_ : Dict = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
A_ : Optional[Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
def _a (self ):
A_ : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
A_ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCamelCase_ )
A_ : Tuple = prepare_img()
A_ : List[Any] = processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
with torch.no_grad():
A_ : Optional[int] = model(**UpperCamelCase_ )
A_ : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
A_ : int = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) | 704 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowerCamelCase :Dict = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
lowerCamelCase :int = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(_A , _A )
A_ : Any = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def a ( ):
'''simple docstring'''
A_ : Optional[int] = "rougeLsum"
A_ : Tuple = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k]
A_ : Union[str, Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k]
assert score > score_no_sep
def a ( ):
'''simple docstring'''
A_ : List[Any] = ["rouge1", "rouge2", "rougeL"]
A_ : Union[str, Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A )
A_ : List[Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A )
assert score_sep == score_no_sep
def a ( ):
'''simple docstring'''
A_ : Optional[int] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
A_ : int = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(_A , _A , newline_sep=_A ) == calculate_rouge(_A , _A , newline_sep=_A )
def a ( ):
'''simple docstring'''
A_ : Optional[Any] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
A_ : Dict = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
A_ : Tuple = calculate_rouge(_A , _A , rouge_keys=["""rougeLsum"""] , newline_sep=_A )["rougeLsum"]
A_ : Optional[int] = calculate_rouge(_A , _A , rouge_keys=["""rougeLsum"""] )["rougeLsum"]
assert new_score > prev_score
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(_A , _A )
A_ : int = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=_A )
assert isinstance(_A , _A ) | 705 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase :List[Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase :Any = {
'''roberta-base''': 5_1_2,
'''roberta-large''': 5_1_2,
'''roberta-large-mnli''': 5_1_2,
'''distilroberta-base''': 5_1_2,
'''roberta-base-openai-detector''': 5_1_2,
'''roberta-large-openai-detector''': 5_1_2,
}
class _lowerCAmelCase ( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Dict = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Optional[int] = RobertaTokenizer
def __init__(self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
A_ : Tuple = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
A_ : Optional[Any] = add_prefix_space
A_ : List[str] = pre_tok_class(**UpperCAmelCase_ )
A_ : str = add_prefix_space
A_ : Optional[int] = 'post_processor'
A_ : Dict = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : Union[str, Any] = False
if state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
A_ : int = add_prefix_space
A_ : List[str] = True
if state.get("""trim_offsets""" , UpperCAmelCase_ ) != trim_offsets:
A_ : Tuple = trim_offsets
A_ : List[str] = True
if changes_to_apply:
A_ : Tuple = getattr(UpperCAmelCase_ , state.pop("""type""" ) )
A_ : Any = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
@property
def _a (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a (self , lowercase ):
A_ : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value
A_ : Any = value
def _a (self , *lowercase , **lowercase ):
A_ : str = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _a (self , *lowercase , **lowercase ):
A_ : Dict = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _a (self , lowercase , lowercase = None ):
A_ : Tuple = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _a (self , lowercase , lowercase=None ):
A_ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a (self , lowercase , lowercase = None ):
A_ : Any = [self.sep_token_id]
A_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 706 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowerCAmelCase ( UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__(self , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(self , **_lowercase )
A_ : int = repo_info
A_ : List[Any] = token
A_ : Union[str, Any] = None
def _a (self ):
if self.dir_cache is None:
A_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
A_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {"""name""": str(_lowercase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _a (self , lowercase , lowercase = "rb" , **lowercase , ):
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' )
A_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _a (self , lowercase , **lowercase ):
self._get_dirs()
A_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def _a (self , lowercase , lowercase=False , **lowercase ):
self._get_dirs()
A_ : List[str] = PurePosixPath(path.strip("""/""" ) )
A_ : List[str] = {}
for p, f in self.dir_cache.items():
A_ : Tuple = PurePosixPath(p.strip("""/""" ) )
A_ : Optional[int] = p.parent
if root == path:
A_ : List[str] = f
A_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 707 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
import torch
from torch import nn
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase=1 , lowercase=False ):
super().__init__()
A_ : Optional[Any] = n_token
A_ : List[Any] = d_embed
A_ : Dict = d_proj
A_ : Dict = cutoffs + [n_token]
A_ : List[str] = [0] + self.cutoffs
A_ : Union[str, Any] = div_val
A_ : int = self.cutoffs[0]
A_ : Dict = len(self.cutoffs ) - 1
A_ : Dict = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
A_ : Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
A_ : Any = nn.Parameter(torch.zeros(self.n_clusters ) )
A_ : int = nn.ModuleList()
A_ : str = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase , lowercase ) ) )
else:
self.out_projs.append(lowercase )
self.out_layers.append(nn.Linear(lowercase , lowercase ) )
else:
for i in range(len(self.cutoffs ) ):
A_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ : Optional[int] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase , lowercase ) ) )
self.out_layers.append(nn.Linear(lowercase , r_idx - l_idx ) )
A_ : Optional[Any] = keep_order
def _a (self , lowercase , lowercase , lowercase , lowercase ):
if proj is None:
A_ : Any = nn.functional.linear(lowercase , lowercase , bias=lowercase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
A_ : Dict = nn.functional.linear(lowercase , proj.t().contiguous() )
A_ : Dict = nn.functional.linear(lowercase , lowercase , bias=lowercase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _a (self , lowercase , lowercase=None , lowercase=False ):
if labels is not None:
# Shift so that tokens < n predict n
A_ : Dict = hidden[..., :-1, :].contiguous()
A_ : Dict = labels[..., 1:].contiguous()
A_ : int = hidden.view(-1 , hidden.size(-1 ) )
A_ : Optional[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
A_ : Tuple = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
A_ : Any = self._compute_logit(lowercase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
A_ : List[str] = labels != -100
A_ : List[Any] = torch.zeros_like(lowercase , dtype=hidden.dtype , device=hidden.device )
A_ : Optional[int] = (
-nn.functional.log_softmax(lowercase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
A_ : Tuple = nn.functional.log_softmax(lowercase , dim=-1 )
else:
# construct weights and biases
A_ : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A_ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
A_ : str = self.out_layers[0].bias[l_idx:r_idx]
else:
A_ : Union[str, Any] = self.out_layers[i].weight
A_ : Dict = self.out_layers[i].bias
if i == 0:
A_ : List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
A_ : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase )
biases.append(lowercase )
A_ : Any = weights[0], biases[0], self.out_projs[0]
A_ : Optional[int] = self._compute_logit(lowercase , lowercase , lowercase , lowercase )
A_ : str = nn.functional.log_softmax(lowercase , dim=1 )
if labels is None:
A_ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
A_ : List[Any] = torch.zeros_like(lowercase , dtype=hidden.dtype , device=hidden.device )
A_ : Dict = 0
A_ : List[Any] = [0] + self.cutoffs
for i in range(len(lowercase ) - 1 ):
A_ : List[Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
A_ : List[str] = (labels >= l_idx) & (labels < r_idx)
A_ : Optional[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
A_ : Any = labels.index_select(0 , lowercase ) - l_idx
A_ : str = head_logprob.index_select(0 , lowercase )
A_ : int = hidden.index_select(0 , lowercase )
else:
A_ : Dict = hidden
if i == 0:
if labels is not None:
A_ : Optional[int] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
A_ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
A_ : int = weights[i], biases[i], self.out_projs[i]
A_ : Union[str, Any] = self._compute_logit(lowercase , lowercase , lowercase , lowercase )
A_ : Union[str, Any] = nn.functional.log_softmax(lowercase , dim=1 )
A_ : Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
A_ : Any = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
A_ : int = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
A_ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowercase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _a (self , lowercase ):
if self.n_clusters == 0:
A_ : str = self._compute_logit(lowercase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowercase , dim=-1 )
else:
# construct weights and biases
A_ : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ : int = self.out_layers[0].weight[l_idx:r_idx]
A_ : int = self.out_layers[0].bias[l_idx:r_idx]
else:
A_ : str = self.out_layers[i].weight
A_ : Union[str, Any] = self.out_layers[i].bias
if i == 0:
A_ : str = torch.cat([weight_i, self.cluster_weight] , dim=0 )
A_ : Dict = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase )
biases.append(lowercase )
A_ : Optional[int] = weights[0], biases[0], self.out_projs[0]
A_ : str = self._compute_logit(lowercase , lowercase , lowercase , lowercase )
A_ : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
A_ : List[str] = nn.functional.log_softmax(lowercase , dim=1 )
A_ : str = [0] + self.cutoffs
for i in range(len(lowercase ) - 1 ):
A_ : Any = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
A_ : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
A_ : List[str] = weights[i], biases[i], self.out_projs[i]
A_ : str = self._compute_logit(lowercase , lowercase , lowercase , lowercase )
A_ : Dict = nn.functional.log_softmax(lowercase , dim=1 )
A_ : Optional[int] = head_logprob[:, -i] + tail_logprob_i
A_ : str = logprob_i
return out | 708 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 0 |
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCamelCase :List[Any] = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
lowerCamelCase :int = 'hopper-medium-v2'
lowerCamelCase :List[Any] = gym.make(env_name)
lowerCamelCase :str = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
lowerCamelCase :List[str] = env.reset()
lowerCamelCase :Any = 0
lowerCamelCase :Tuple = 0
lowerCamelCase :Optional[Any] = 1_0_0_0
lowerCamelCase :Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCamelCase :List[Any] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
lowerCamelCase :Optional[int] = env.step(denorm_actions)
lowerCamelCase :Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCamelCase :List[str] = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}") | 709 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = "segformer"
def __init__(self , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[32, 64, 160, 256] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[1, 2, 5, 8] , lowercase=[4, 4, 4, 4] , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.02 , lowercase=0.1 , lowercase=1E-6 , lowercase=256 , lowercase=255 , **lowercase , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
A_ : int = num_channels
A_ : Dict = num_encoder_blocks
A_ : Dict = depths
A_ : Optional[Any] = sr_ratios
A_ : Any = hidden_sizes
A_ : Union[str, Any] = patch_sizes
A_ : Union[str, Any] = strides
A_ : Union[str, Any] = mlp_ratios
A_ : Dict = num_attention_heads
A_ : List[Any] = hidden_act
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = classifier_dropout_prob
A_ : List[Any] = initializer_range
A_ : List[str] = drop_path_rate
A_ : Any = layer_norm_eps
A_ : List[str] = decoder_hidden_size
A_ : Any = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
A_ : str = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 710 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCamelCase :List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCamelCase :List[Any] = {'facebook/blenderbot-3B': 1_2_8}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Any = BlenderbotTokenizer
def __init__(self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
A_ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
A_ : Optional[Any] = getattr(__A , pre_tok_state.pop("""type""" ) )
A_ : List[Any] = add_prefix_space
A_ : Optional[Any] = pre_tok_class(**__A )
A_ : Any = add_prefix_space
A_ : Any = "post_processor"
A_ : Optional[Any] = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : List[Any] = tuple(state["""sep"""] )
if "cls" in state:
A_ : Optional[int] = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" , __A ) != add_prefix_space:
A_ : List[str] = add_prefix_space
A_ : int = True
if state.get("""trim_offsets""" , __A ) != trim_offsets:
A_ : List[str] = trim_offsets
A_ : Dict = True
if changes_to_apply:
A_ : Tuple = getattr(__A , state.pop("""type""" ) )
A_ : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _a (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a (self , lowercase ):
A_ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
A_ : List[str] = value
def _a (self , *lowercase , **lowercase ):
A_ : int = kwargs.get("""is_split_into_words""" , __A )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A , **__A )
def _a (self , *lowercase , **lowercase ):
A_ : Optional[int] = kwargs.get("""is_split_into_words""" , __A )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A , **__A )
def _a (self , lowercase , lowercase = None ):
A_ : Optional[int] = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _a (self , lowercase , lowercase = None ):
A_ : int = [self.sep_token_id]
A_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a (self , lowercase , lowercase = None ):
return token_ids_a + [self.eos_token_id]
def _a (self , lowercase ):
A_ : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
A_ : List[Any] = " ".join(__A )
A_ : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
A_ : Tuple = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 711 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
with open(__lowerCAmelCase ) as metadata_file:
A_ : Union[str, Any] = json.load(__lowerCAmelCase )
A_ : Optional[Any] = LukeConfig(use_entity_aware_attention=__lowerCAmelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ : List[Any] = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ : Dict = load_original_entity_vocab(__lowerCAmelCase )
# add an entry for [MASK2]
A_ : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ : Dict = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ : List[Any] = AddedToken("""<ent>""" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
A_ : List[Any] = AddedToken("""<ent2>""" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """r""" ) as f:
A_ : Union[str, Any] = json.load(__lowerCAmelCase )
A_ : Tuple = """MLukeTokenizer"""
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
A_ : int = MLukeTokenizer.from_pretrained(__lowerCAmelCase )
# Initialize the embeddings of the special tokens
A_ : Tuple = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ : List[str] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ : Any = state_dict["""embeddings.word_embeddings.weight"""]
A_ : int = word_emb[ent_init_index].unsqueeze(0 )
A_ : Any = word_emb[enta_init_index].unsqueeze(0 )
A_ : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ : List[str] = state_dict[bias_name]
A_ : List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
A_ : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
A_ : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ : Union[str, Any] = f'encoder.layer.{layer_index}.attention.self.'
A_ : Union[str, Any] = state_dict[prefix + matrix_name]
A_ : Tuple = state_dict[prefix + matrix_name]
A_ : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ : List[Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ : Optional[Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ : Optional[int] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ : Optional[int] = state_dict["""entity_predictions.bias"""]
A_ : Optional[int] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ : Any = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ : Optional[int] = LukeForMaskedLM(config=__lowerCAmelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ : Optional[Any] = state_dict[key]
else:
A_ : Dict = state_dict[key]
A_ : int = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if set(__lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(__lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ : Optional[Any] = MLukeTokenizer.from_pretrained(__lowerCAmelCase , task="""entity_classification""" )
A_ : Tuple = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ : str = (0, 9)
A_ : int = tokenizer(__lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
A_ : Optional[int] = model(**__lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ : Tuple = torch.Size((1, 33, 7_68) )
A_ : List[Any] = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ : Optional[Any] = torch.Size((1, 1, 7_68) )
A_ : Any = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ : Tuple = MLukeTokenizer.from_pretrained(__lowerCAmelCase )
A_ : Optional[int] = """Tokyo is the capital of <mask>."""
A_ : Union[str, Any] = (24, 30)
A_ : Union[str, Any] = tokenizer(__lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
A_ : Union[str, Any] = model(**__lowerCAmelCase )
A_ : Dict = encoding["""input_ids"""][0].tolist()
A_ : str = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowerCAmelCase )
A_ : Tuple = outputs.entity_logits[0][0].argmax().item()
A_ : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__lowerCAmelCase ) )
model.save_pretrained(__lowerCAmelCase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ : List[str] = [json.loads(__lowerCAmelCase ) for line in open(__lowerCAmelCase )]
A_ : int = {}
for entry in data:
A_ : List[Any] = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ : Optional[Any] = entity_id
break
A_ : List[Any] = f'{language}:{entity_name}'
A_ : Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCamelCase :int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ):
A_ : str = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A_ : Any = parent
A_ : Dict = batch_size
A_ : str = num_channels
A_ : Union[str, Any] = min_resolution
A_ : List[Any] = max_resolution
A_ : List[str] = do_resize
A_ : Optional[int] = size
A_ : Union[str, Any] = do_normalize
A_ : List[Any] = image_mean
A_ : Optional[Any] = image_std
A_ : Optional[int] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Tuple = do_pad
def _a (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a (self , lowercase , lowercase=False ):
if not batched:
A_ : Tuple = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A_, A_ : str = image.size
else:
A_, A_ : str = image.shape[1], image.shape[2]
if w < h:
A_ : Optional[Any] = int(self.size["""shortest_edge"""] * h / w )
A_ : str = self.size["""shortest_edge"""]
elif w > h:
A_ : List[str] = self.size["""shortest_edge"""]
A_ : str = int(self.size["""shortest_edge"""] * w / h )
else:
A_ : int = self.size["""shortest_edge"""]
A_ : Optional[int] = self.size["""shortest_edge"""]
else:
A_ : Any = []
for image in image_inputs:
A_, A_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Union[str, Any] = max(lowercase , key=lambda lowercase : item[0] )[0]
A_ : Tuple = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( _snake_case , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = DetaImageProcessor if is_vision_available() else None
def _a (self ):
A_ : Any = DetaImageProcessingTester(self )
@property
def _a (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """image_mean""" ) )
self.assertTrue(hasattr(lowercase , """image_std""" ) )
self.assertTrue(hasattr(lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase , """do_pad""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
def _a (self ):
A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
def _a (self ):
pass
def _a (self ):
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_, A_ : List[str] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_, A_ : Tuple = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A_ : Union[str, Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ):
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_, A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[int] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
A_, A_ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ):
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_, A_ : Tuple = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Dict = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
A_, A_ : Any = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a (self ):
A_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A_ : Tuple = json.loads(f.read() )
A_ : int = {"""image_id""": 39769, """annotations""": target}
# encode them
A_ : List[Any] = DetaImageProcessor()
A_ : List[Any] = image_processing(images=lowercase , annotations=lowercase , return_tensors="""pt""" )
# verify pixel values
A_ : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase )
A_ : Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : int = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase ) )
# verify boxes
A_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase )
A_ : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase ) )
# verify is_crowd
A_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase ) )
# verify class_labels
A_ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase ) )
# verify orig_size
A_ : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase ) )
# verify size
A_ : Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase ) )
@slow
def _a (self ):
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A_ : List[Any] = json.loads(f.read() )
A_ : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
A_ : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A_ : int = DetaImageProcessor(format="""coco_panoptic""" )
A_ : Dict = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="""pt""" )
# verify pixel values
A_ : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase )
A_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase )
A_ : List[str] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase ) )
# verify is_crowd
A_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase ) )
# verify class_labels
A_ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase ) )
# verify masks
A_ : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase )
# verify orig_size
A_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase ) )
# verify size
A_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase ) ) | 713 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 0 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase :Dict = '''Usage of script: script_name <size_of_canvas:int>'''
lowerCamelCase :str = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = [[False for i in range(lowerCAmelCase__ )] for j in range(lowerCAmelCase__ )]
return canvas
def a ( lowerCamelCase__ ):
'''simple docstring'''
for i, row in enumerate(lowerCAmelCase__ ):
for j, _ in enumerate(lowerCAmelCase__ ):
A_ : List[Any] = bool(random.getrandbits(1 ) )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = np.array(lowerCAmelCase__ )
A_ : str = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowerCAmelCase__ ):
for c, pt in enumerate(lowerCAmelCase__ ):
A_ : Dict = __judge_point(
lowerCAmelCase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
A_ : Dict = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
A_ : List[Any] = current_canvas.tolist()
return return_canvas
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
A_ : List[str] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
A_ : Optional[Any] = pt
if pt:
if alive < 2:
A_ : Any = False
elif alive == 2 or alive == 3:
A_ : Union[str, Any] = True
elif alive > 3:
A_ : Dict = False
else:
if alive == 3:
A_ : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase :List[str] = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase :List[str] = create_canvas(canvas_size)
seed(c)
lowerCamelCase , lowerCamelCase :Dict = plt.subplots()
fig.show()
lowerCamelCase :Tuple = ListedColormap(['''w''', '''k'''])
try:
while True:
lowerCamelCase :Union[str, Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass | 714 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 0 |
import copy
import re
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = 'hp'
__SCREAMING_SNAKE_CASE : List[Any] = {}
__SCREAMING_SNAKE_CASE : int = None
@classmethod
def _a (cls , lowercase , lowercase ):
A_ : Optional[int] = prefix
A_ : Union[str, Any] = defaults
cls.build_naming_info()
@staticmethod
def _a (lowercase , lowercase ):
if len(_UpperCamelCase ) == 0:
return ""
A_ : Optional[Any] = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCamelCase ) + 1 ):
A_ : List[str] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A_ : int = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowercase ):
A_ : Union[str, Any] = """"""
while integer != 0:
A_ : Any = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
A_ : int = 0
while True:
A_ : int = word + """#""" + int_to_alphabetic(_UpperCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
A_ : Optional[Any] = sword
break
A_ : Tuple = short_word
A_ : Union[str, Any] = word
return short_word
@staticmethod
def _a (lowercase , lowercase ):
A_ : Optional[Any] = param_name.split("""_""" )
A_ : int = [TrialShortNamer.shortname_for_word(_UpperCamelCase , _UpperCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A_ : str = ["""""", """_"""]
for separator in separators:
A_ : Optional[Any] = separator.join(_UpperCamelCase )
if shortname not in info["reverse_short_param"]:
A_ : Any = shortname
A_ : Optional[int] = param_name
return shortname
return param_name
@staticmethod
def _a (lowercase , lowercase ):
A_ : str = TrialShortNamer.shortname_for_key(_UpperCamelCase , _UpperCamelCase )
A_ : int = short_name
A_ : Optional[int] = param_name
@classmethod
def _a (cls ):
if cls.NAMING_INFO is not None:
return
A_ : str = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
A_ : int = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCamelCase , _UpperCamelCase )
A_ : List[Any] = info
@classmethod
def _a (cls , lowercase ):
cls.build_naming_info()
assert cls.PREFIX is not None
A_ : List[str] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A_ : Any = cls.NAMING_INFO["""short_param"""][k]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
A_ : int = 1 if v else 0
A_ : List[str] = """""" if isinstance(_UpperCamelCase , (int, float) ) else """-"""
A_ : Dict = F'{key}{sep}{v}'
name.append(_UpperCamelCase )
return "_".join(_UpperCamelCase )
@classmethod
def _a (cls , lowercase ):
A_ : Tuple = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A_ : List[Any] = []
else:
A_ : Dict = repr.split("""_""" )
A_ : Tuple = {}
for value in values:
if "-" in value:
A_ : Tuple = value.split("""-""" )
else:
A_ : int = re.sub("""[0-9.]""" , """""" , _UpperCamelCase )
A_ : Tuple = float(re.sub("""[^0-9.]""" , """""" , _UpperCamelCase ) )
A_ : Dict = cls.NAMING_INFO["""reverse_short_param"""][p_k]
A_ : int = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A_ : List[Any] = cls.DEFAULTS[k]
return parameters | 715 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 42
class _lowerCAmelCase ( a__ , a__ ):
@register_to_config
def __init__(self , lowercase = 32 , lowercase = 64 , lowercase = 20 , lowercase = 768 , lowercase=77 , lowercase=4 , lowercase = 0.0 , lowercase = "silu" , lowercase = None , lowercase = None , lowercase = "linear" , lowercase = "prd" , lowercase = None , lowercase = None , lowercase = None , ):
super().__init__()
A_ : Any = num_attention_heads
A_ : Optional[int] = attention_head_dim
A_ : Tuple = num_attention_heads * attention_head_dim
A_ : Optional[Any] = additional_embeddings
A_ : Union[str, Any] = time_embed_dim or inner_dim
A_ : Union[str, Any] = embedding_proj_dim or embedding_dim
A_ : Optional[int] = clip_embed_dim or embedding_dim
A_ : int = Timesteps(_A , _A , 0 )
A_ : int = TimestepEmbedding(_A , _A , out_dim=_A , act_fn=_A )
A_ : List[Any] = nn.Linear(_A , _A )
if embedding_proj_norm_type is None:
A_ : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
A_ : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
A_ : Tuple = nn.Linear(_A , _A )
if encoder_hid_proj_type is None:
A_ : int = None
elif encoder_hid_proj_type == "linear":
A_ : List[Any] = nn.Linear(_A , _A )
else:
raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
A_ : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _A ) )
if added_emb_type == "prd":
A_ : Dict = nn.Parameter(torch.zeros(1 , 1 , _A ) )
elif added_emb_type is None:
A_ : List[Any] = None
else:
raise ValueError(
F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
A_ : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , activation_fn="""gelu""" , attention_bias=_A , )
for d in range(_A )
] )
if norm_in_type == "layer":
A_ : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
A_ : Any = None
else:
raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.' )
A_ : Union[str, Any] = nn.LayerNorm(_A )
A_ : int = nn.Linear(_A , _A )
A_ : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
A_ : Tuple = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _A , persistent=_A )
A_ : Tuple = nn.Parameter(torch.zeros(1 , _A ) )
A_ : Dict = nn.Parameter(torch.zeros(1 , _A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _a (self ):
A_ : List[Any] = {}
def fn_recursive_add_processors(lowercase , lowercase , lowercase ):
if hasattr(_A , """set_processor""" ):
A_ : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def _a (self , lowercase ):
A_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(_A )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(lowercase , lowercase , lowercase ):
if hasattr(_A , """set_processor""" ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def _a (self ):
self.set_attn_processor(AttnProcessor() )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = True , ):
A_ : str = hidden_states.shape[0]
A_ : int = timestep
if not torch.is_tensor(_A ):
A_ : str = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
A_ : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : Optional[int] = timesteps * torch.ones(_A , dtype=timesteps.dtype , device=timesteps.device )
A_ : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A_ : Any = timesteps_projected.to(dtype=self.dtype )
A_ : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
A_ : int = self.embedding_proj_norm(_A )
A_ : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A_ : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
A_ : Any = self.proj_in(_A )
A_ : Dict = self.positional_embedding.to(hidden_states.dtype )
A_ : List[Any] = []
A_ : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A_ : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A_ : Any = hidden_states[:, None, :]
A_ : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A_ : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A , -1 , -1 )
additional_embeds.append(_A )
A_ : List[str] = torch.cat(
_A , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A_ : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A_ : Any = F.pad(
_A , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
A_ : int = hidden_states + positional_embeddings
if attention_mask is not None:
A_ : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
A_ : Union[str, Any] = F.pad(_A , (0, self.additional_embeddings) , value=0.0 )
A_ : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A_ : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
A_ : Any = self.norm_in(_A )
for block in self.transformer_blocks:
A_ : int = block(_A , attention_mask=_A )
A_ : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
A_ : Optional[int] = hidden_states[:, -1]
else:
A_ : Any = hidden_states[:, additional_embeddings_len:]
A_ : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def _a (self , lowercase ):
A_ : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 716 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 0 |
'''simple docstring'''
from math import sqrt
def a ( lowerCamelCase__ = 1_00_00_00 ):
'''simple docstring'''
A_ : int = 0
A_ : int = 0
A_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }") | 717 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 0 |
'''simple docstring'''
from math import pow
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A_ : Dict = int(pow(a_ , a_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A_ : List[str] = backtrack(
a_ , a_ , current_number + 1 , a_ , a_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A_ : Tuple = backtrack(
a_ , a_ , current_number + 1 , a_ , a_ )
return current_sum, solutions_count
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(a_ , a_ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod() | 718 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( enum.Enum ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'generated'
def __init__(self , *lowercase , **lowercase ):
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _a (self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , **lowercase , ):
A_ : Union[str, Any] = {}
if truncation is not None:
A_ : Tuple = truncation
A_ : Dict = generate_kwargs
A_ : Tuple = {}
if return_tensors is not None and return_type is None:
A_ : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
A_ : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
A_ : str = clean_up_tokenization_spaces
if stop_sequence is not None:
A_ : str = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
A_ : int = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a (self , lowercase , lowercase , lowercase ):
return True
def _a (self , *lowercase , lowercase ):
A_ : Dict = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
A_ : Tuple = ([prefix + arg for arg in args[0]],)
A_ : str = True
elif isinstance(args[0] , __UpperCamelCase ):
A_ : Any = (prefix + args[0],)
A_ : List[Any] = False
else:
raise ValueError(
F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
A_ : Any = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self , *lowercase , **lowercase ):
A_ : Any = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _a (self , lowercase , lowercase=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase ):
A_ : str = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _a (self , lowercase , **lowercase ):
if self.framework == "pt":
A_, A_ : Optional[Any] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
A_, A_ : str = tf.shape(model_inputs["""input_ids"""] ).numpy()
A_ : Optional[Any] = generate_kwargs.get("""min_length""" , self.model.config.min_length )
A_ : Union[str, Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
A_ : List[str] = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
A_ : Tuple = output_ids.shape[0]
if self.framework == "pt":
A_ : Dict = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
A_ : List[str] = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _a (self , lowercase , lowercase=ReturnType.TEXT , lowercase=False ):
A_ : Union[str, Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
A_ : str = {F'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
A_ : Dict = {
F'{self.return_name}_text': self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'summary'
def __call__(self , *lowercase , **lowercase ):
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _a (self , lowercase , lowercase , lowercase ):
if max_length < min_length:
logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'translation'
def _a (self , lowercase , lowercase , lowercase ):
if input_length > 0.9 * max_length:
logger.warning(
F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _a (self , *lowercase , lowercase=TruncationStrategy.DO_NOT_TRUNCATE , lowercase=None , lowercase=None ):
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _a (self , lowercase=None , lowercase=None , **lowercase ):
A_, A_, A_ : Optional[Any] = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
A_ : Optional[int] = src_lang
if tgt_lang is not None:
A_ : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
A_ : List[str] = kwargs.get("""task""" , self.task )
A_ : Optional[Any] = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
A_ : str = items[1]
A_ : int = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__(self , *lowercase , **lowercase ):
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 719 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 0 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
lowerCamelCase :Any = Path(__file__).parent / """model_card_template.md"""
lowerCamelCase :int = uuida().hex
lowerCamelCase :List[str] = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase :List[Any] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def a ( lowerCamelCase__ = None ):
'''simple docstring'''
A_ : int = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
ua += "; " + user_agent
return ua
def a ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ):
'''simple docstring'''
if token is None:
A_ : List[Any] = HfFolder.get_token()
if organization is None:
A_ : Tuple = whoami(lowerCamelCase__ )["""name"""]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(lowerCamelCase__ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
A_ : Optional[int] = args.hub_token if hasattr(lowerCamelCase__ , """hub_token""" ) else None
A_ : str = get_full_repo_name(lowerCamelCase__ , token=lowerCamelCase__ )
A_ : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCamelCase__ , model_name=lowerCamelCase__ , repo_name=lowerCamelCase__ , dataset_name=args.dataset_name if hasattr(lowerCamelCase__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCamelCase__ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCamelCase__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCamelCase__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCamelCase__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCamelCase__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCamelCase__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCamelCase__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCamelCase__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(lowerCamelCase__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCamelCase__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
A_ : str = os.path.join(args.output_dir , """README.md""" )
model_card.save(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ : Any = str(Path(lowerCamelCase__ ).as_posix() )
A_ : Dict = re.search(r"""snapshots/([^/]+)/""" , lowerCamelCase__ )
if search is None:
return None
A_ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCamelCase__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCamelCase :List[str] = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCamelCase :Optional[Any] = os.path.join(hf_cache_home, '''diffusers''')
def a ( lowerCamelCase__ = None , lowerCamelCase__ = None ):
'''simple docstring'''
if new_cache_dir is None:
A_ : List[str] = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ : str = old_diffusers_cache
A_ : Union[str, Any] = Path(lowerCamelCase__ ).expanduser()
A_ : List[Any] = Path(lowerCamelCase__ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ : Optional[int] = new_cache_dir / old_blob_path.relative_to(lowerCamelCase__ )
new_blob_path.parent.mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
os.replace(lowerCamelCase__ , lowerCamelCase__ )
try:
os.symlink(lowerCamelCase__ , lowerCamelCase__ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCamelCase :str = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCamelCase :Any = 0
else:
with open(cache_version_file) as f:
try:
lowerCamelCase :Optional[int] = int(f.read())
except ValueError:
lowerCamelCase :Optional[Any] = 0
if cache_version < 1:
lowerCamelCase :Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCamelCase :Optional[Any] = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'''the directory exists and can be written to.'''
)
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if variant is not None:
A_ : int = weights_name.split(""".""" )
A_ : str = splits[:-1] + [variant] + splits[-1:]
A_ : List[str] = """.""".join(lowerCamelCase__ )
return weights_name
def a ( lowerCamelCase__ , *,
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ):
'''simple docstring'''
A_ : List[Any] = str(lowerCamelCase__ )
if os.path.isfile(lowerCamelCase__ ):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCamelCase__ ):
if os.path.isfile(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) ):
# Load from a PyTorch checkpoint
A_ : Union[str, Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ):
A_ : List[Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCamelCase__ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
A_ : Tuple = hf_hub_download(
lowerCamelCase__ , filename=_add_variant(lowerCamelCase__ , lowerCamelCase__ ) , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , proxies=lowerCamelCase__ , resume_download=lowerCamelCase__ , local_files_only=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , user_agent=lowerCamelCase__ , subfolder=lowerCamelCase__ , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , lowerCamelCase__ , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCamelCase__ , lowerCamelCase__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(lowerCamelCase__ , lowerCamelCase__ )}\' so that the correct variant file can be added.' , lowerCamelCase__ , )
try:
# 2. Load model file as usual
A_ : Optional[Any] = hf_hub_download(
lowerCamelCase__ , filename=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , proxies=lowerCamelCase__ , resume_download=lowerCamelCase__ , local_files_only=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , user_agent=lowerCamelCase__ , subfolder=lowerCamelCase__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"""listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"""this model name. Check the model page at """
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.""" )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"""\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. """
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' ) | 720 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCamelCase :List[str] = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase :Tuple = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase :Optional[Any] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowerCamelCase :List[str] = model.state_dict()
lowerCamelCase :Any = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase :List[Any] = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
lowerCamelCase :Any = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
lowerCamelCase :Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
lowerCamelCase :int = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
lowerCamelCase :List[str] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
lowerCamelCase :Optional[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
lowerCamelCase :str = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
lowerCamelCase :Tuple = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
lowerCamelCase :Optional[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
lowerCamelCase :Tuple = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
lowerCamelCase :Any = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
lowerCamelCase :List[str] = state_dict['''cls.predictions.decoder.weight''']
lowerCamelCase :Tuple = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase :List[Any] = state_dict[F"cls.predictions.transform.dense.{w}"]
lowerCamelCase :List[str] = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint) | 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_00 , ):
'''simple docstring'''
A_ : Tuple = x_start
A_ : Optional[Any] = fnc(snake_case__ )
A_ : Any = 0.0
for _ in range(snake_case__ ):
# Approximates curve as a sequence of linear lines and sums their length
A_ : List[Any] = (x_end - x_start) / steps + xa
A_ : str = fnc(snake_case__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A_ : Union[str, Any] = xa
A_ : Union[str, Any] = fxa
return length
if __name__ == "__main__":
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
lowerCamelCase :List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(F"With {i} steps: {line_length(f, -1_0, 1_0, i)}")
i *= 1_0 | 700 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 0 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = checkpoint
A_ : Tuple = {}
A_ : Any = vae_state_dict["""encoder.conv_in.weight"""]
A_ : Any = vae_state_dict["""encoder.conv_in.bias"""]
A_ : List[Any] = vae_state_dict["""encoder.conv_out.weight"""]
A_ : List[Any] = vae_state_dict["""encoder.conv_out.bias"""]
A_ : Tuple = vae_state_dict["""encoder.norm_out.weight"""]
A_ : Any = vae_state_dict["""encoder.norm_out.bias"""]
A_ : Any = vae_state_dict["""decoder.conv_in.weight"""]
A_ : Tuple = vae_state_dict["""decoder.conv_in.bias"""]
A_ : List[str] = vae_state_dict["""decoder.conv_out.weight"""]
A_ : Any = vae_state_dict["""decoder.conv_out.bias"""]
A_ : Optional[int] = vae_state_dict["""decoder.norm_out.weight"""]
A_ : Any = vae_state_dict["""decoder.norm_out.bias"""]
A_ : Optional[int] = vae_state_dict["""quant_conv.weight"""]
A_ : int = vae_state_dict["""quant_conv.bias"""]
A_ : Optional[int] = vae_state_dict["""post_quant_conv.weight"""]
A_ : Tuple = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
A_ : int = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
A_ : Dict = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(__SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the decoder up blocks only
A_ : Dict = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
A_ : Tuple = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(__SCREAMING_SNAKE_CASE )
}
for i in range(__SCREAMING_SNAKE_CASE ):
A_ : Any = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A_ : Optional[Any] = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
A_ : Optional[Any] = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
A_ : Dict = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {"""old""": f'down.{i}.block', """new""": f'down_blocks.{i}.resnets'}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
A_ : Dict = [key for key in vae_state_dict if """encoder.mid.block""" in key]
A_ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A_ : List[str] = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
A_ : List[Any] = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
A_ : Optional[int] = {"""old""": f'mid.block_{i}', """new""": f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
A_ : List[Any] = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
A_ : Union[str, Any] = renew_vae_attention_paths(__SCREAMING_SNAKE_CASE )
A_ : Tuple = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
conv_attn_to_linear(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
A_ : int = num_up_blocks - 1 - i
A_ : Optional[int] = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A_ : Optional[int] = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
A_ : List[str] = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
A_ : int = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = {"""old""": f'up.{block_id}.block', """new""": f'up_blocks.{i}.resnets'}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
A_ : List[str] = [key for key in vae_state_dict if """decoder.mid.block""" in key]
A_ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A_ : Tuple = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
A_ : Optional[Any] = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {"""old""": f'mid.block_{i}', """new""": f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
A_ : str = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
A_ : Optional[int] = renew_vae_attention_paths(__SCREAMING_SNAKE_CASE )
A_ : int = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
conv_attn_to_linear(__SCREAMING_SNAKE_CASE )
return new_checkpoint
def a ( lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
A_ : Union[str, Any] = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
A_ : Optional[int] = io.BytesIO(r.content )
A_ : Optional[Any] = OmegaConf.load(__SCREAMING_SNAKE_CASE )
A_ : str = 5_12
A_ : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
A_ : int = {}
with safe_open(__SCREAMING_SNAKE_CASE , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
A_ : Union[str, Any] = f.get_tensor(__SCREAMING_SNAKE_CASE )
else:
A_ : Dict = torch.load(__SCREAMING_SNAKE_CASE , map_location=__SCREAMING_SNAKE_CASE )["""state_dict"""]
# Convert the VAE model.
A_ : Optional[Any] = create_vae_diffusers_config(__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE )
A_ : Optional[int] = custom_convert_ldm_vae_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A_ : List[str] = AutoencoderKL(**__SCREAMING_SNAKE_CASE )
vae.load_state_dict(__SCREAMING_SNAKE_CASE )
vae.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase :List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 701 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionSAGPipeline
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : str = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = False
def _a (self ):
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : List[Any] = CLIPTextModel(lowercase )
A_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
if str(lowercase ).startswith("""mps""" ):
A_ : Union[str, Any] = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : List[str] = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : Dict = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
A_ : Any = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = """."""
A_ : str = torch.manual_seed(0 )
A_ : Tuple = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
A_ : Tuple = output.images
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Any = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _a (self ):
A_ : List[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : Any = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
A_ : int = """."""
A_ : List[str] = torch.manual_seed(0 )
A_ : List[Any] = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
A_ : str = output.images
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Union[str, Any] = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _a (self ):
A_ : List[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : Optional[int] = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[Any] = """."""
A_ : str = torch.manual_seed(0 )
A_ : Tuple = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
A_ : List[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 702 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase="divided_space_time" , lowercase=None , ):
A_ : List[str] = parent
A_ : str = batch_size
A_ : Optional[Any] = image_size
A_ : Dict = num_channels
A_ : List[str] = patch_size
A_ : int = num_frames
A_ : List[str] = is_training
A_ : Union[str, Any] = use_labels
A_ : int = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : str = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : List[str] = attention_type
A_ : int = initializer_range
A_ : List[str] = scope
A_ : Any = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
A_ : List[Any] = (image_size // patch_size) ** 2
A_ : str = (num_frames) * self.num_patches_per_frame + 1
def _a (self ):
A_ : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A_ : Any = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a (self ):
A_ : Tuple = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
A_ : List[Any] = self.num_labels
return config
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = TimesformerModel(config=__A )
model.to(__A )
model.eval()
A_ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = TimesformerForVideoClassification(__A )
model.to(__A )
model.eval()
A_ : Dict = model(__A )
# verify the logits shape
A_ : Union[str, Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __A )
def _a (self ):
A_ : Dict = self.prepare_config_and_inputs()
A_ : List[str] = config_and_inputs
A_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Tuple = False
def _a (self ):
A_ : Optional[int] = TimesformerModelTester(self )
A_ : Tuple = ConfigTester(
self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : Optional[Any] = copy.deepcopy(__A )
if return_labels:
if model_class in get_values(__A ):
A_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(__A )
A_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__A )
@slow
def _a (self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = TimesformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _a (self ):
if not self.has_attentions:
pass
else:
A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
A_ : List[Any] = self.model_tester.seq_length
A_ : Optional[int] = self.model_tester.num_frames
A_ : int = True
A_ : Optional[Any] = False
A_ : Optional[int] = True
A_ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
A_ : str = model(**self._prepare_for_class(__A , __A ) )
A_ : Dict = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : List[Any] = True
A_ : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
A_ : int = model(**self._prepare_for_class(__A , __A ) )
A_ : Dict = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
A_ : Dict = len(__A )
# Check attention is always last and order is fine
A_ : Union[str, Any] = True
A_ : List[str] = True
A_ : Optional[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
A_ : str = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
A_ : str = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
A_ : str = model(**self._prepare_for_class(__A , __A ) )
A_ : Dict = outputs.hidden_states
A_ : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__A ) , __A )
A_ : Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : int = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(__A , __A , __A )
def a ( ) -> Optional[int]:
'''simple docstring'''
A_ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A_ : List[Any] = np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _a (self ):
A_ : Tuple = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
__A )
A_ : Tuple = self.default_image_processor
A_ : str = prepare_video()
A_ : str = image_processor(video[:8] , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
A_ : Optional[Any] = model(**__A )
# verify the logits
A_ : Optional[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __A )
A_ : Tuple = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) ) | 703 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = BertConfig.from_json_file(_lowerCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
A_ : List[Any] = BertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 704 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'ViltImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _a , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a , _a )
A_ : Optional[Any] = self.image_processor
def __call__(self , lowercase , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
A_ : int = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*_a , **_a )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*_a , **_a )
@property
def _a (self ):
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , )
return self.image_processor | 705 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __a ):
__SCREAMING_SNAKE_CASE : Tuple = ['audio_values', 'audio_mask']
def __init__(self , lowercase=2048 , lowercase=1 , lowercase=[16, 16] , lowercase=128 , lowercase=44100 , lowercase=86 , lowercase=2048 , lowercase=0.0 , **lowercase , ):
super().__init__(
feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ , )
A_ : Optional[int] = spectrogram_length
A_ : Dict = num_channels
A_ : int = patch_size
A_ : Any = feature_size // self.patch_size[1]
A_ : List[Any] = n_fft
A_ : List[str] = sampling_rate // hop_length_to_sampling_rate
A_ : int = sampling_rate
A_ : Dict = padding_value
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a_ , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _a (self , lowercase ):
A_ : List[Any] = spectrogram(
a_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
A_ : Dict = log_spec[:, :-1]
A_ : Union[str, Any] = log_spec - 20.0
A_ : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowercase , lowercase = None , lowercase = True , lowercase = None , lowercase = False , lowercase = False , **lowercase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : str = isinstance(a_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
A_ : int = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : List[str] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A_ : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a_ ):
A_ : str = [np.asarray(a_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A_ : Any = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A_ : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A_ : Optional[int] = np.array(a_ ).astype(np.floataa )
# convert into correct format for padding
A_ : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A_ : Optional[Any] = np.ones([len(a_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A_ : Any = padded_audio_features * self.padding_value
for i in range(len(a_ ) ):
A_ : Optional[Any] = audio_features[i]
A_ : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
A_ : Dict = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
A_ : Optional[Any] = {"""audio_values""": padded_audio_features}
A_ : int = BatchFeature(data=a_ , tensor_type=a_ )
return encoded_inputs | 706 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def a ( lowerCamelCase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
A_ : str = 2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def a ( lowerCamelCase__ = 2_00_00_00 ):
'''simple docstring'''
return sum(takewhile(lambda lowerCamelCase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 707 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCamelCase :List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , use_external_data_format=UpperCAmelCase__ , enable_onnx_checker=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
else:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
A_ : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A_ : Tuple = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
A_ : Union[str, Any] = """cpu"""
A_ : List[str] = Path(UpperCAmelCase__ )
# VAE DECODER
A_ : Dict = AutoencoderKL.from_pretrained(model_path + """/vae""" )
A_ : Any = vae_decoder.config.latent_channels
# forward only through the decoder part
A_ : Optional[int] = vae_decoder.decode
onnx_export(
UpperCAmelCase__ , model_args=(
torch.randn(1 , UpperCAmelCase__ , 25 , 25 ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCAmelCase__ , )
del vae_decoder
if __name__ == "__main__":
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCamelCase :Dict = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''') | 708 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _lowerCAmelCase ( a__ ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase=3 , lowercase=3 , lowercase=("DownEncoderBlock2D",) , lowercase=(64,) , lowercase=2 , lowercase=32 , lowercase="silu" , lowercase=True , ):
super().__init__()
A_ : Tuple = layers_per_block
A_ : List[str] = torch.nn.Convad(
lowerCAmelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ : str = None
A_ : Any = nn.ModuleList([] )
# down
A_ : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase__ ):
A_ : Dict = output_channel
A_ : str = block_out_channels[i]
A_ : int = i == len(lowerCAmelCase__ ) - 1
A_ : List[Any] = get_down_block(
lowerCAmelCase__ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
self.down_blocks.append(lowerCAmelCase__ )
# mid
A_ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
# out
A_ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase__ , eps=1E-6 )
A_ : Dict = nn.SiLU()
A_ : Union[str, Any] = 2 * out_channels if double_z else out_channels
A_ : List[Any] = nn.Convad(block_out_channels[-1] , lowerCAmelCase__ , 3 , padding=1 )
A_ : List[str] = False
def _a (self , lowercase ):
A_ : Optional[Any] = x
A_ : Dict = self.conv_in(lowerCAmelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowerCAmelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
# middle
A_ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
else:
for down_block in self.down_blocks:
A_ : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ )
# middle
A_ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ : Optional[int] = down_block(lowerCAmelCase__ )
# middle
A_ : int = self.mid_block(lowerCAmelCase__ )
# post-process
A_ : Tuple = self.conv_norm_out(lowerCAmelCase__ )
A_ : int = self.conv_act(lowerCAmelCase__ )
A_ : str = self.conv_out(lowerCAmelCase__ )
return sample
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase=3 , lowercase=3 , lowercase=("UpDecoderBlock2D",) , lowercase=(64,) , lowercase=2 , lowercase=32 , lowercase="silu" , lowercase="group" , ):
super().__init__()
A_ : List[str] = layers_per_block
A_ : Optional[int] = nn.Convad(
lowerCAmelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ : int = None
A_ : Union[str, Any] = nn.ModuleList([] )
A_ : Optional[int] = in_channels if norm_type == "spatial" else None
# mid
A_ : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
# up
A_ : List[str] = list(reversed(lowerCAmelCase__ ) )
A_ : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase__ ):
A_ : str = output_channel
A_ : Tuple = reversed_block_out_channels[i]
A_ : Any = i == len(lowerCAmelCase__ ) - 1
A_ : str = get_up_block(
lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , resnet_time_scale_shift=lowerCAmelCase__ , )
self.up_blocks.append(lowerCAmelCase__ )
A_ : List[str] = output_channel
# out
if norm_type == "spatial":
A_ : Optional[Any] = SpatialNorm(block_out_channels[0] , lowerCAmelCase__ )
else:
A_ : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase__ , eps=1E-6 )
A_ : int = nn.SiLU()
A_ : Union[str, Any] = nn.Convad(block_out_channels[0] , lowerCAmelCase__ , 3 , padding=1 )
A_ : int = False
def _a (self , lowercase , lowercase=None ):
A_ : Any = z
A_ : Union[str, Any] = self.conv_in(lowerCAmelCase__ )
A_ : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowerCAmelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
A_ : List[Any] = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
A_ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
else:
# middle
A_ : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ )
A_ : int = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
A_ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# middle
A_ : List[Any] = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ )
A_ : int = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
A_ : Optional[Any] = up_block(lowerCAmelCase__ , lowerCAmelCase__ )
# post-process
if latent_embeds is None:
A_ : str = self.conv_norm_out(lowerCAmelCase__ )
else:
A_ : Optional[int] = self.conv_norm_out(lowerCAmelCase__ , lowerCAmelCase__ )
A_ : Any = self.conv_act(lowerCAmelCase__ )
A_ : Tuple = self.conv_out(lowerCAmelCase__ )
return sample
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase , lowercase , lowercase=None , lowercase="random" , lowercase=False , lowercase=True ):
super().__init__()
A_ : Union[str, Any] = n_e
A_ : Optional[Any] = vq_embed_dim
A_ : int = beta
A_ : Any = legacy
A_ : str = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ : str = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ : List[Any] = self.used.shape[0]
A_ : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ : List[str] = self.re_embed
A_ : List[str] = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A_ : str = n_e
A_ : Union[str, Any] = sane_index_shape
def _a (self , lowercase ):
A_ : List[Any] = inds.shape
assert len(lowerCAmelCase__ ) > 1
A_ : Union[str, Any] = inds.reshape(ishape[0] , -1 )
A_ : Optional[Any] = self.used.to(lowerCAmelCase__ )
A_ : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
A_ : str = match.argmax(-1 )
A_ : int = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ : Optional[Any] = self.unknown_index
return new.reshape(lowerCAmelCase__ )
def _a (self , lowercase ):
A_ : str = inds.shape
assert len(lowerCAmelCase__ ) > 1
A_ : Tuple = inds.reshape(ishape[0] , -1 )
A_ : Dict = self.used.to(lowerCAmelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ : Optional[Any] = 0 # simply set to zero
A_ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase__ )
return back.reshape(lowerCAmelCase__ )
def _a (self , lowercase ):
# reshape z -> (batch, height, width, channel) and flatten
A_ : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ : Optional[int] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ : Tuple = torch.argmin(torch.cdist(lowerCAmelCase__ , self.embedding.weight ) , dim=1 )
A_ : Dict = self.embedding(lowerCAmelCase__ ).view(z.shape )
A_ : Dict = None
A_ : Tuple = None
# compute loss for embedding
if not self.legacy:
A_ : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ : Optional[int] = z + (z_q - z).detach()
# reshape back to match original input shape
A_ : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ : Optional[Any] = self.remap_to_used(lowerCAmelCase__ )
A_ : List[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ : List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _a (self , lowercase , lowercase ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ : List[str] = indices.reshape(shape[0] , -1 ) # add batch axis
A_ : Optional[Any] = self.unmap_to_all(lowerCAmelCase__ )
A_ : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ : str = self.embedding(lowerCAmelCase__ )
if shape is not None:
A_ : List[str] = z_q.view(lowerCAmelCase__ )
# reshape back to match original input shape
A_ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _lowerCAmelCase ( a__ ):
def __init__(self , lowercase , lowercase=False ):
A_ : Any = parameters
A_ : Tuple = torch.chunk(lowerCAmelCase__ , 2 , dim=1 )
A_ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ : int = deterministic
A_ : Optional[Any] = torch.exp(0.5 * self.logvar )
A_ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
A_ : int = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _a (self , lowercase = None ):
# make sure sample is on the same device as the parameters and has same dtype
A_ : List[str] = randn_tensor(
self.mean.shape , generator=lowerCAmelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ : List[str] = self.mean + self.std * sample
return x
def _a (self , lowercase=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _a (self , lowercase , lowercase=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
A_ : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase__ )
def _a (self ):
return self.mean | 709 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowerCamelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowerCamelCase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowerCamelCase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True) | 710 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 0 |
'''simple docstring'''
import qiskit
def a ( lowerCamelCase__ = 2 ):
'''simple docstring'''
A_ : Tuple = qubits
# Using Aer's simulator
A_ : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
A_ : Optional[Any] = qiskit.QuantumCircuit(_snake_case , _snake_case )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _snake_case ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _snake_case )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_snake_case ) ) , list(range(_snake_case ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
A_ : List[Any] = qiskit.execute(_snake_case , _snake_case , shots=10_00 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(F"Total count for various states are: {quantum_entanglement(3)}") | 711 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :str = logging.get_logger(__name__)
lowerCamelCase :Optional[int] = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE : int = 'swinv2'
__SCREAMING_SNAKE_CASE : List[str] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=224 , lowercase=4 , lowercase=3 , lowercase=96 , lowercase=[2, 2, 6, 2] , lowercase=[3, 6, 12, 24] , lowercase=7 , lowercase=4.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=False , lowercase=0.02 , lowercase=1E-5 , lowercase=32 , **lowercase , ):
super().__init__(**UpperCamelCase__ )
A_ : int = image_size
A_ : Union[str, Any] = patch_size
A_ : str = num_channels
A_ : Union[str, Any] = embed_dim
A_ : Optional[Any] = depths
A_ : Any = len(UpperCamelCase__ )
A_ : str = num_heads
A_ : Tuple = window_size
A_ : Dict = mlp_ratio
A_ : Dict = qkv_bias
A_ : str = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Optional[Any] = drop_path_rate
A_ : Optional[Any] = hidden_act
A_ : Union[str, Any] = use_absolute_embeddings
A_ : Optional[int] = layer_norm_eps
A_ : str = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
A_ : Optional[Any] = (0, 0, 0, 0) | 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowerCamelCase :Dict = logging.getLogger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , ):
'''simple docstring'''
A_ : str = bnb_quantization_config.load_in_abit
A_ : Dict = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A_ : int = []
# custom device map
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(device_map.keys() ) > 1:
A_ : int = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A_ : List[str] = get_keys_to_not_convert(__UpperCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__UpperCamelCase )
A_ : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A_ : Optional[int] = []
A_ : List[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__UpperCamelCase )
# compatibility with peft
A_ : List[str] = load_in_abit
A_ : Optional[int] = load_in_abit
A_ : Optional[int] = get_parameter_device(__UpperCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A_ : Dict = replace_with_bnb_layers(__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase )
# convert param to the right dtype
A_ : Union[str, Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A_ : List[str] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A_ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__UpperCamelCase ):
param.to(__UpperCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
A_ : List[Any] = replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase )
A_ : List[str] = get_quantized_model_device_map(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_memory=__UpperCamelCase , no_split_module_classes=__UpperCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A_ : Union[str, Any] = True
A_ : List[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__UpperCamelCase , offload_state_dict=__UpperCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__UpperCamelCase , device_map=__UpperCamelCase , offload_dir=__UpperCamelCase )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
A_ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A_ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A_ : Optional[int] = {}
A_ : List[Any] = special_dtypes
A_ : List[str] = no_split_module_classes
A_ : Optional[int] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A_ : Optional[int] = get_balanced_memory(
__UpperCamelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=__UpperCamelCase , **__UpperCamelCase , )
A_ : Optional[int] = max_memory
A_ : str = infer_auto_device_map(__UpperCamelCase , **__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
# check if don't have any quantized module on the cpu
A_ : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A_ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
'''simple docstring'''
if modules_to_not_convert is None:
A_ : Union[str, Any] = []
A_, A_ : List[Any] = _replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
A_ : str = False
for name, module in model.named_children():
if current_key_name is None:
A_ : List[Any] = []
current_key_name.append(__UpperCamelCase )
if isinstance(__UpperCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A_ : int = """.""".join(__UpperCamelCase )
A_ : Dict = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A_ : Tuple = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A_ : Union[str, Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__UpperCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A_ : Any = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A_ : List[str] = module.weight.data
if module.bias is not None:
A_ : Optional[Any] = module.bias.data
bnb_module.requires_grad_(__UpperCamelCase )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A_ : Optional[int] = True
if len(list(module.children() ) ) > 0:
A_, A_ : Any = _replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A_ : Any = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( lowerCamelCase__ ):
'''simple docstring'''
with init_empty_weights():
A_ : int = deepcopy(__UpperCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A_ : Dict = find_tied_parameters(__UpperCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A_ : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A_ : Union[str, Any] = sum(__UpperCamelCase , [] )
A_ : str = len(__UpperCamelCase ) > 0
# Check if it is a base model
A_ : Any = False
if hasattr(__UpperCamelCase , """base_model_prefix""" ):
A_ : Optional[int] = not hasattr(__UpperCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A_ : List[Any] = list(model.named_children() )
A_ : str = [list_modules[-1][0]]
# add last module together with tied weights
A_ : Union[str, Any] = set(__UpperCamelCase ) - set(__UpperCamelCase )
A_ : Union[str, Any] = list(set(__UpperCamelCase ) ) + list(__UpperCamelCase )
# remove ".weight" from the keys
A_ : int = [""".weight""", """.bias"""]
A_ : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A_ : str = name.replace(__UpperCamelCase , """""" )
filtered_module_names.append(__UpperCamelCase )
return filtered_module_names
def a ( lowerCamelCase__ ):
'''simple docstring'''
for m in model.modules():
if isinstance(__UpperCamelCase , bnb.nn.Linearabit ):
return True
return False
def a ( lowerCamelCase__ ):
'''simple docstring'''
return next(parameter.parameters() ).device
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , 0 , dtype=__UpperCamelCase , value=__UpperCamelCase )
A_ : Any = param_name
A_ : List[Any] = model
if "." in tensor_name:
A_ : Union[str, Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
A_ : Any = getattr(__UpperCamelCase , __UpperCamelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
A_ : Optional[Any] = new_module
A_ : Union[str, Any] = splits[-1]
# offload weights
A_ : Any = False
offload_weight(module._parameters[tensor_name] , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , __UpperCamelCase , index=__UpperCamelCase , )
else:
offload_weight(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase )
offload_weight(__UpperCamelCase , param_name.replace("""weight""" , """SCB""" ) , __UpperCamelCase , index=__UpperCamelCase )
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , """meta""" , dtype=__UpperCamelCase , value=torch.empty(*param.size() ) ) | 713 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 0 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = set(lowerCamelCase__ ), [start]
while stack:
A_ : int = stack.pop()
explored.add(lowerCamelCase__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowerCamelCase__ )
return explored
lowerCamelCase :Union[str, Any] = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A''')) | 714 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 0 |
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = []
A_ : str = []
A_ : str = 0
A_ : Dict = sum(SCREAMING_SNAKE_CASE_ )
create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return result
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
if sum(SCREAMING_SNAKE_CASE_ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE_ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE_ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE_ )
return
for index in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE_ , remaining_nums_sum - nums[index] , )
lowerCamelCase :Optional[int] = [3, 3_4, 4, 1_2, 5, 2]
lowerCamelCase :Union[str, Any] = 9
lowerCamelCase :Any = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 715 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = 0
if start < end:
A_ : List[str] = randint(_lowerCAmelCase , _lowerCAmelCase )
A_ : Union[str, Any] = a[end]
A_ : Any = a[pivot]
A_ : Union[str, Any] = temp
A_ : Union[str, Any] = _in_place_partition(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += _in_place_quick_sort(_lowerCAmelCase , _lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(_lowerCAmelCase , p + 1 , _lowerCAmelCase )
return count
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = 0
A_ : int = randint(_lowerCAmelCase , _lowerCAmelCase )
A_ : Tuple = a[end]
A_ : Union[str, Any] = a[pivot]
A_ : Optional[int] = temp
A_ : Any = start - 1
for index in range(_lowerCAmelCase , _lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
A_ : Any = new_pivot_index + 1
A_ : Optional[Any] = a[new_pivot_index]
A_ : Union[str, Any] = a[index]
A_ : List[str] = temp
A_ : List[str] = a[new_pivot_index + 1]
A_ : Dict = a[end]
A_ : Union[str, Any] = temp
return new_pivot_index + 1, count
lowerCamelCase :Union[str, Any] = TemporaryFile()
lowerCamelCase :Optional[int] = 1_0_0 # 1000 elements are to be sorted
lowerCamelCase :List[str] = 0, 1 # mean and standard deviation
lowerCamelCase :Dict = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
lowerCamelCase :List[Any] = np.load(outfile)
lowerCamelCase :Optional[Any] = len(M) - 1
lowerCamelCase :Optional[int] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z) | 716 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 0 |
'''simple docstring'''
import numpy as np
from PIL import Image
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = np.array(lowerCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
A_ : Tuple = 0
A_ : Optional[Any] = 0
A_ : Optional[int] = 0
A_ : Optional[Any] = 0
# compute the shape of the output matrix
A_ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A_ : Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A_ : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A_ : Dict = 0
A_ : Union[str, Any] = 0
return updated_arr
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = np.array(lowerCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
A_ : List[str] = 0
A_ : Optional[int] = 0
A_ : List[str] = 0
A_ : Optional[int] = 0
# compute the shape of the output matrix
A_ : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A_ : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A_ : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A_ : str = 0
A_ : Union[str, Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
lowerCamelCase :Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 717 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 0 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
A_ : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" , revision=__lowerCAmelCase ) | 718 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
A_ : int = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler("""sample_euler""" )
A_ : Union[str, Any] = """A painting of a squirrel eating a burger"""
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Optional[Any] = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
A_ : Tuple = output.images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Union[str, Any] = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
A_ : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : str = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler("""sample_euler""" )
A_ : int = """A painting of a squirrel eating a burger"""
A_ : Dict = torch.manual_seed(0 )
A_ : Dict = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
A_ : Optional[int] = output.images
A_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Tuple = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _a (self ):
A_ : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : str = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
A_ : Dict = """A painting of a squirrel eating a burger"""
A_ : Any = torch.manual_seed(0 )
A_ : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=lowerCAmelCase_ , )
A_ : str = output.images
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Any = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 719 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not (isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
A_ : Union[str, Any] = len(UpperCamelCase__ )
A_ : int = len(UpperCamelCase__ )
A_ : Union[str, Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
A_ : int = 0
A_ : int = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
A_ : Union[str, Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
A_ : int = i
A_ : Dict = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase :Union[str, Any] = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 42
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , ):
super().__init__()
self.register_modules(
prior=UpperCAmelCase__ , image_encoder=UpperCAmelCase__ , image_processor=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , renderer=UpperCAmelCase__ , )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
if latents is None:
A_ : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
A_ : Optional[int] = latents.to(UpperCAmelCase__ )
A_ : List[str] = latents * scheduler.init_noise_sigma
return latents
def _a (self , lowercase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A_ : List[str] = torch.device(F'cuda:{gpu_id}' )
A_ : str = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
@property
def _a (self ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCAmelCase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _a (self , lowercase , lowercase , lowercase , lowercase , ):
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(image[0] , torch.Tensor ):
A_ : List[str] = torch.cat(UpperCAmelCase__ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCAmelCase__ , axis=0 )
if not isinstance(UpperCAmelCase__ , torch.Tensor ):
A_ : Optional[int] = self.image_processor(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
A_ : Dict = image.to(dtype=self.image_encoder.dtype , device=UpperCAmelCase__ )
A_ : Union[str, Any] = self.image_encoder(UpperCAmelCase__ )["""last_hidden_state"""]
A_ : str = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A_ : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
A_ : str = torch.zeros_like(UpperCAmelCase__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : List[Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__(self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ):
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
A_ : Any = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
A_ : Optional[Any] = image.shape[0]
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A_ : Optional[Any] = len(UpperCAmelCase__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCAmelCase__ )}' )
A_ : int = self._execution_device
A_ : Optional[int] = batch_size * num_images_per_prompt
A_ : Any = guidance_scale > 1.0
A_ : Optional[int] = self._encode_image(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# prior
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
A_ : int = self.scheduler.timesteps
A_ : Dict = self.prior.config.num_embeddings
A_ : Optional[Any] = self.prior.config.embedding_dim
A_ : int = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A_ : List[str] = latents.reshape(latents.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
A_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : int = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
A_ : str = self.prior(
UpperCAmelCase__ , timestep=UpperCAmelCase__ , proj_embedding=UpperCAmelCase__ , ).predicted_image_embedding
# remove the variance
A_, A_ : int = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A_, A_ : Any = noise_pred.chunk(2 )
A_ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A_ : str = self.scheduler.step(
UpperCAmelCase__ , timestep=UpperCAmelCase__ , sample=UpperCAmelCase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCAmelCase__ )
A_ : Optional[Any] = []
for i, latent in enumerate(UpperCAmelCase__ ):
print()
A_ : str = self.renderer.decode(
latent[None, :] , UpperCAmelCase__ , size=UpperCAmelCase__ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCAmelCase__ )
A_ : str = torch.stack(UpperCAmelCase__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
A_ : Tuple = images.cpu().numpy()
if output_type == "pil":
A_ : Union[str, Any] = [self.numpy_to_pil(UpperCAmelCase__ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCAmelCase__ ) | 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 0 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCamelCase :str = getLogger(__name__)
lowerCamelCase :Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 8 , lowerCamelCase__ = DEFAULT_DEVICE , lowerCamelCase__=False , lowerCamelCase__="summarization" , lowerCamelCase__=None , **lowerCamelCase__ , ):
'''simple docstring'''
A_ : Optional[Any] = Path(lowerCamelCase__ ).open("""w""" , encoding="""utf-8""" )
A_ : str = str(lowerCamelCase__ )
A_ : str = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
if fpaa:
A_ : List[str] = model.half()
A_ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
A_ : List[str] = time.time()
# update config with task specific params
use_task_specific_params(lowerCamelCase__ , lowerCamelCase__ )
if prefix is None:
A_ : List[Any] = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(lowerCamelCase__ , lowerCamelCase__ ) ) ):
A_ : Dict = [prefix + text for text in examples_chunk]
A_ : Any = tokenizer(lowerCamelCase__ , return_tensors="""pt""" , truncation=lowerCamelCase__ , padding="""longest""" ).to(lowerCamelCase__ )
A_ : Optional[Any] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **lowerCamelCase__ , )
A_ : Optional[int] = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
A_ : Tuple = int(time.time() - start_time ) # seconds
A_ : Any = len(lowerCamelCase__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
'''simple docstring'''
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def a ( lowerCamelCase__=True ):
'''simple docstring'''
A_ : Dict = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=lowerCamelCase__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=lowerCamelCase__ , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=lowerCamelCase__ , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=lowerCamelCase__ , required=lowerCamelCase__ , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=lowerCamelCase__ , required=lowerCamelCase__ , default=lowerCamelCase__ , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=lowerCamelCase__ , required=lowerCamelCase__ , default=lowerCamelCase__ , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=lowerCamelCase__ , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=lowerCamelCase__ , default=8 , required=lowerCamelCase__ , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=lowerCamelCase__ , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
A_, A_ : Tuple = parser.parse_known_args()
A_ : List[Any] = parse_numeric_n_bool_cl_kwargs(lowerCamelCase__ )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
A_ : int = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
A_ : Optional[int] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=lowerCamelCase__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can\'t mix --fp16 and --device cpu""" )
A_ : Optional[Any] = generate_summaries_or_translations(
lowerCamelCase__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **lowerCamelCase__ , )
if args.reference_path is None:
return {}
# Compute scores
A_ : Optional[Any] = calculate_bleu if """translation""" in args.task else calculate_rouge
A_ : Union[str, Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
A_ : int = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowerCamelCase__ )]
A_ : int = score_fn(lowerCamelCase__ , lowerCamelCase__ )
scores.update(lowerCamelCase__ )
if args.dump_args:
scores.update(lowerCamelCase__ )
if args.info:
A_ : Tuple = args.info
if verbose:
print(lowerCamelCase__ )
if args.score_path is not None:
json.dump(lowerCamelCase__ , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True) | 700 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 0 |
'''simple docstring'''
import requests
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = {"""Content-Type""": """application/json"""}
A_ : List[str] = requests.post(_SCREAMING_SNAKE_CASE , json={"""text""": message_body} , headers=_SCREAMING_SNAKE_CASE )
if response.status_code != 2_00:
A_ : Dict = (
"""Request to slack returned an error """
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''') | 701 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) - 1 , 0 , -1 ):
A_ : List[str] = False
for j in range(lowercase__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
A_, A_ : Any = unsorted[j - 1], unsorted[j]
A_ : Tuple = True
for j in range(lowercase__ ):
if unsorted[j] > unsorted[j + 1]:
A_, A_ : Optional[Any] = unsorted[j + 1], unsorted[j]
A_ : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase :str = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase :Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(F"{cocktail_shaker_sort(unsorted) = }")
| 702 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase :Union[str, Any] = {
# 1536-bit
5: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=1_6,
),
'generator': 2,
},
}
class _lowerCAmelCase :
def __init__(self , lowercase = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
A_ : Any = primes[group]['''prime''']
A_ : Union[str, Any] = primes[group]['''generator''']
A_ : Union[str, Any] = int(hexlify(urandom(32 ) ) , base=16 )
def _a (self ):
return hex(self.__private_key )[2:]
def _a (self ):
A_ : Tuple = pow(self.generator , self.__private_key , self.prime )
return hex(lowercase__ )[2:]
def _a (self , lowercase ):
return (
2 <= key <= self.prime - 2
and pow(lowercase__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _a (self , lowercase ):
A_ : str = int(lowercase__ , base=16 )
if not self.is_valid_public_key(lowercase__ ):
raise ValueError("""Invalid public key""" )
A_ : int = pow(lowercase__ , self.__private_key , self.prime )
return shaaaa(str(lowercase__ ).encode() ).hexdigest()
@staticmethod
def _a (lowercase , lowercase ):
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowercase__ , (prime - 1) // 2 , lowercase__ ) == 1
)
@staticmethod
def _a (lowercase , lowercase , lowercase = 14 ):
A_ : List[Any] = int(lowercase__ , base=16 )
A_ : str = int(lowercase__ , base=16 )
A_ : Union[str, Any] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(lowercase__ , lowercase__ ):
raise ValueError("""Invalid public key""" )
A_ : List[str] = pow(lowercase__ , lowercase__ , lowercase__ )
return shaaaa(str(lowercase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 703 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase :str = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__SCREAMING_SNAKE_CASE : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__SCREAMING_SNAKE_CASE : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _a (self ):
A_ : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
A_ : Any = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
A_ : Dict = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}] )
A_ : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
] , )
A_ : Tuple = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
# Legacy behavior
A_ : Optional[Any] = text_classifier("""This is great !""" , return_all_scores=lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
A_ : List[Any] = text_classifier("""This is great !""" , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}]] )
A_ : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
] , )
A_ : Optional[int] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"""label""": """LABEL_0""", """score""": 0.5_04},
{"""label""": """LABEL_0""", """score""": 0.5_04},
] , )
@require_torch
def _a (self ):
import torch
A_ : Optional[int] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
A_ : Optional[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
@require_tf
def _a (self ):
A_ : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
A_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
@slow
@require_torch
def _a (self ):
A_ : Tuple = pipeline("""text-classification""" )
A_ : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
A_ : List[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
A_ : List[str] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] )
@slow
@require_tf
def _a (self ):
A_ : int = pipeline("""text-classification""" , framework="""tf""" )
A_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
A_ : List[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
A_ : Optional[int] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] )
def _a (self , lowercase , lowercase , lowercase ):
A_ : int = TextClassificationPipeline(model=lowercase__ , tokenizer=lowercase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _a (self , lowercase , lowercase ):
A_ : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
A_ : Union[str, Any] = """HuggingFace is in"""
A_ : str = text_classifier(lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
A_ : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
A_ : List[str] = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}, {"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
A_ : List[Any] = text_classifier(lowercase__ , top_k=lowercase__ )
A_ : int = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase__ ) , [[{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] * N, [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] * N] , )
A_ : List[str] = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
A_ : Optional[int] = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , {"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
A_ : Optional[int] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(lowercase__ ):
text_classifier(lowercase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
A_ : Union[str, Any] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) | 704 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = XLMRobertaTokenizer
__SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizerFast
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Tuple = True
def _a (self ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ : List[Any] = XLMRobertaTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
A_ : List[str] = '''<pad>'''
A_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def _a (self ):
A_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 1002 )
def _a (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _a (self ):
A_ : str = XLMRobertaTokenizer(__snake_case , keep_accents=__snake_case )
A_ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : Tuple = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A_ : Any = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _a (self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : List[str] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
A_ : int = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
A_ : Optional[int] = tempfile.mkdtemp()
A_ : List[Any] = tokenizer_r.save_pretrained(__snake_case )
A_ : List[str] = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A_ : Any = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
A_ : Dict = tokenizer_r.from_pretrained(__snake_case )
A_ : Optional[Any] = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
A_ : List[str] = tempfile.mkdtemp()
A_ : Optional[Any] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
A_ : Any = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
A_ : Any = tokenizer_r.from_pretrained(__snake_case )
A_ : Optional[int] = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
A_ : Dict = tempfile.mkdtemp()
A_ : Optional[int] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
A_ : Optional[int] = tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A_ : Tuple = tokenizer_r.from_pretrained(__snake_case )
A_ : str = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@cached_property
def _a (self ):
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def _a (self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__snake_case , f.name )
A_ : Optional[int] = XLMRobertaTokenizer(f.name , keep_accents=__snake_case )
A_ : Any = pickle.dumps(__snake_case )
pickle.loads(__snake_case )
def _a (self ):
if not self.test_rust_tokenizer:
return
A_ : Optional[Any] = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : List[str] = '''I was born in 92000, and this is falsé.'''
A_ : Any = tokenizer.tokenize(__snake_case )
A_ : Any = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
A_ : List[str] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
A_ : Dict = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
A_ : Optional[Any] = self.get_rust_tokenizer()
A_ : List[str] = tokenizer.encode(__snake_case )
A_ : Dict = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def _a (self ):
A_ : Tuple = '''Hello World!'''
A_ : Dict = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def _a (self ):
A_ : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
A_ : Optional[int] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def _a (self ):
# fmt: off
A_ : Any = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , ) | 705 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase :Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = ["""MaskFormerFeatureExtractor"""]
lowerCamelCase :Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
lowerCamelCase :List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 706 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase :Optional[Any] = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[str] = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = np.max(__UpperCamelCase , axis=-1 , keepdims=__UpperCamelCase )
A_ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__UpperCamelCase )
class _lowerCAmelCase ( UpperCAmelCase_ ):
def _a (self , **lowercase ):
A_ : List[Any] = {}
if "second_text" in kwargs:
A_ : Tuple = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _a (self , lowercase , lowercase=None ):
return self.tokenizer(_snake_case , text_pair=_snake_case , return_tensors=self.framework )
def _a (self , lowercase ):
return self.model(**_snake_case )
def _a (self , lowercase ):
A_ : str = model_outputs.logits[0].numpy()
A_ : Optional[int] = softmax(_snake_case )
A_ : int = np.argmax(_snake_case )
A_ : Optional[int] = self.model.config.idalabel[best_class]
A_ : List[Any] = probabilities[best_class].item()
A_ : Tuple = logits.tolist()
return {"label": label, "score": score, "logits": logits} | 708 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.