code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase__( __UpperCamelCase: list[list[int]] ,__UpperCamelCase: list[int] ,__UpperCamelCase: list[int] ,__UpperCamelCase: int ,__UpperCamelCase: list[list[int]] ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE : Tuple = init[0]
SCREAMING_SNAKE_CASE : Optional[Any] = init[1]
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE : int = [[f, g, x, y]]
SCREAMING_SNAKE_CASE : List[str] = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE : Dict = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCamelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE : List[Any] = cell.pop()
SCREAMING_SNAKE_CASE : List[str] = next_cell[2]
SCREAMING_SNAKE_CASE : Dict = next_cell[3]
SCREAMING_SNAKE_CASE : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
for i in range(len(__UpperCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE : Tuple = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE : Union[str, Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE : List[Any] = g + cost
SCREAMING_SNAKE_CASE : Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = i
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : str = goal[0]
SCREAMING_SNAKE_CASE : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE : Optional[Any] = xa
SCREAMING_SNAKE_CASE : Dict = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE : int = []
for i in range(len(__UpperCamelCase ) ):
path.append(invpath[len(__UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase_ = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase_ = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase_ = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase_ = 9_9
UpperCamelCase_ , UpperCamelCase_ = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 28 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 1 |
'''simple docstring'''
from math import ceil
def lowercase__( __UpperCamelCase: int = 10_01 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE : str = 2 * i + 1
SCREAMING_SNAKE_CASE : Tuple = 2 * i
SCREAMING_SNAKE_CASE : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCamelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 28 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 1 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' ,[
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] ,)
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' ,'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
SCREAMING_SNAKE_CASE : Tuple = DatasetInfosDict.from_directory(__UpperCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' ,[
DatasetInfo(),
DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,),
] ,)
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: DatasetInfo ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = str(__UpperCamelCase )
dataset_info.write_to_directory(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = DatasetInfo.from_directory(__UpperCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__UpperCamelCase ,'dataset_info.json' ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = DatasetInfo(
description='foo' ,citation='bar' ,homepage='https://foo.bar' ,license='CC0' ,features=Features({'a': Value('int32' )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train', 'num_examples': 42}] ,download_checksums={} ,download_size=13_37 ,post_processing_size=4_42 ,dataset_size=12_34 ,size_in_bytes=13_37 + 4_42 + 12_34 ,)
SCREAMING_SNAKE_CASE : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(__UpperCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
SCREAMING_SNAKE_CASE : List[str] = yaml.safe_dump(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = yaml.safe_load(__UpperCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DatasetInfo()
SCREAMING_SNAKE_CASE : Optional[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' ,[
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] ,)
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: DatasetInfosDict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = str(__UpperCamelCase )
dataset_infos_dict.write_to_directory(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = DatasetInfosDict.from_directory(__UpperCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__UpperCamelCase ,'README.md' ) )
| 28 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 1 |
'''simple docstring'''
import pprint
import requests
UpperCamelCase_ = "https://zenquotes.io/api"
def lowercase__( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def lowercase__( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
UpperCamelCase_ = random_quotes()
pprint.pprint(response)
| 28 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 1 |
'''simple docstring'''
import math
from collections.abc import Callable
def lowercase__( __UpperCamelCase: Callable[[float], float] ,__UpperCamelCase: float ,__UpperCamelCase: float ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : float = xa
SCREAMING_SNAKE_CASE : float = xa
while True:
if x_n == x_na or function(__UpperCamelCase ) == function(__UpperCamelCase ):
raise ZeroDivisionError('float division by zero, could not find root' )
SCREAMING_SNAKE_CASE : float = x_na - (
function(__UpperCamelCase ) / ((function(__UpperCamelCase ) - function(__UpperCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
SCREAMING_SNAKE_CASE : Dict = x_na
SCREAMING_SNAKE_CASE : Tuple = x_na
def lowercase__( __UpperCamelCase: float ):
"""simple docstring"""
return math.pow(__UpperCamelCase ,3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 28 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=100, A=13, A=30, A=2, A=3, A=True, A=True, A=32, A=4, A=4, A=37, A="gelu", A=0.1, A=0.1, A=10, A=0.02, A=3, A=None, A=[0, 1, 2, 3], ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : List[Any] = 100
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = out_indices
SCREAMING_SNAKE_CASE : Tuple = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Union[str, Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Union[str, Any] = num_patches + 1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=A, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = BeitModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = BeitForMaskedImageModeling(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : str = BeitForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Any = BeitForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = BeitForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE : Dict = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A : Tuple = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[Any] = False
A : Any = False
A : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BeitModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A, nn.Linear ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(A )
SCREAMING_SNAKE_CASE : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : int = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE : List[Any] = model_class(A )
model.gradient_checkpointing_enable()
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : Dict = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = _config_zero_init(A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(config=A )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : str = BeitModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).pixel_values.to(A )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(pixel_values=A, bool_masked_pos=A )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(A )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], A, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(A )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(A )
self.assertTrue(torch.allclose(logits[0, :3], A, atol=1E-4 ) )
SCREAMING_SNAKE_CASE : int = 281
self.assertEqual(logits.argmax(-1 ).item(), A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
A )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**A )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(A )
self.assertTrue(torch.allclose(logits[0, :3], A, atol=1E-4 ) )
SCREAMING_SNAKE_CASE : Dict = 2_396
self.assertEqual(logits.argmax(-1 ).item(), A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE : Tuple = model.to(A )
SCREAMING_SNAKE_CASE : int = BeitImageProcessor(do_resize=A, size=640, do_center_crop=A )
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('hf-internal-testing/fixtures_ade20k', split='test' )
SCREAMING_SNAKE_CASE : str = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : str = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : str = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
], device=A, )
else:
SCREAMING_SNAKE_CASE : int = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE : Tuple = model.to(A )
SCREAMING_SNAKE_CASE : Tuple = BeitImageProcessor(do_resize=A, size=640, do_center_crop=A )
SCREAMING_SNAKE_CASE : Any = load_dataset('hf-internal-testing/fixtures_ade20k', split='test' )
SCREAMING_SNAKE_CASE : Optional[int] = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : List[Any] = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : List[Any] = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, A )
| 28 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = "true"
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Any=82 ,__UpperCamelCase: Union[str, Any]=16 ):
"""simple docstring"""
set_seed(42 )
SCREAMING_SNAKE_CASE : List[Any] = RegressionModel()
SCREAMING_SNAKE_CASE : List[str] = deepcopy(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = RegressionDataset(length=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def lowercase__( __UpperCamelCase: Accelerator ,__UpperCamelCase: Tuple=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset('glue' ,'mrpc' ,split='validation' )
def tokenize_function(__UpperCamelCase: str ):
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase: List[Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataloader(__UpperCamelCase ,not dispatch_batches )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' ,return_dict=__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def lowercase__( __UpperCamelCase: Accelerator ,__UpperCamelCase: int=82 ,__UpperCamelCase: List[Any]=False ,__UpperCamelCase: Optional[Any]=False ,__UpperCamelCase: str=16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}"
def lowercase__( __UpperCamelCase: bool = False ,__UpperCamelCase: bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('glue' ,'mrpc' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = setup['no']
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch['labels'] )
SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE : List[str] = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE : Tuple = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator()
test_torch_metrics(__UpperCamelCase ,5_12 )
accelerator.state._reset_state()
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 28 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
self.test()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : str = self.advance()
if not self.does_advance(A ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.update(A )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super(A, self ).__init__()
if not isinstance(A, A ) or len(A ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(A, A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
SCREAMING_SNAKE_CASE : Tuple = token_ids
SCREAMING_SNAKE_CASE : Tuple = len(self.token_ids )
SCREAMING_SNAKE_CASE : Optional[int] = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(A )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : int = False
if self.does_advance(A ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : int = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : int = True
self.reset()
return stepped, completed, reset
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Dict = 0
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : List[Any] = self.fulfilled_idx
SCREAMING_SNAKE_CASE : List[Any] = self.completed
return new_constraint
class _a :
'''simple docstring'''
def __init__( self, A, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = max([len(A ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : Optional[int] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Dict = root
for tidx, token_id in enumerate(A ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[Any] = level[token_id]
if no_subsets and self.has_subsets(A, A ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F" {nested_token_ids}." )
SCREAMING_SNAKE_CASE : Any = root
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : Optional[Any] = start[current_token]
SCREAMING_SNAKE_CASE : Optional[Any] = list(start.keys() )
return next_tokens
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.next_tokens(A )
return len(A ) == 0
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = list(root.values() )
if len(A ) == 0:
return 1
else:
return sum([self.count_leaves(A ) for nn in next_nodes] )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.count_leaves(A )
return len(A ) != leaf_count
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super(A, self ).__init__()
if not isinstance(A, A ) or len(A ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(A, A ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(A, A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
SCREAMING_SNAKE_CASE : Dict = DisjunctiveTrie(A )
SCREAMING_SNAKE_CASE : int = nested_token_ids
SCREAMING_SNAKE_CASE : int = self.trie.max_height
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
if self.does_advance(A ):
self.current_seq.append(A )
SCREAMING_SNAKE_CASE : Tuple = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : int = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[str] = completed
return stepped, completed, reset
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Tuple = self.seqlen
SCREAMING_SNAKE_CASE : Dict = self.current_seq
SCREAMING_SNAKE_CASE : str = self.completed
return new_constraint
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : List[str] = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : str = len(A )
SCREAMING_SNAKE_CASE : Any = False
self.init_state()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : str = [constraint.copy(stateful=A ) for constraint in self.constraints]
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : List[str] = constraint.advance()
if isinstance(A, A ):
token_list.append(A )
elif isinstance(A, A ):
token_list.extend(A )
else:
SCREAMING_SNAKE_CASE : List[Any] = self.inprogress_constraint.advance()
if isinstance(A, A ):
token_list.append(A )
elif isinstance(A, A ):
token_list.extend(A )
if len(A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.add(A )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Dict = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.update(A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A ) )
SCREAMING_SNAKE_CASE : Dict = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : Any = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pending_constraint.update(A )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Any = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : Dict = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase_ ( self, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : int = [
constraint.copy(stateful=A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.copy(stateful=A )
SCREAMING_SNAKE_CASE : Optional[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 28 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 1 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCamelCase_ = get_logger(__name__)
class _a ( enum.Enum ):
'''simple docstring'''
A : Tuple = '''all_checks'''
A : Dict = '''basic_checks'''
A : List[str] = '''no_checks'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__( __UpperCamelCase: Optional[dict] ,__UpperCamelCase: dict ,__UpperCamelCase: Any=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
SCREAMING_SNAKE_CASE : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE : List[Any] = ' for ' + verification_name if verification_name is not None else ''
if len(__UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__( __UpperCamelCase: Optional[dict] ,__UpperCamelCase: dict ):
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
SCREAMING_SNAKE_CASE : List[Any] = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__UpperCamelCase ) )
logger.info('All the splits matched successfully.' )
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: bool = True ):
"""simple docstring"""
if record_checksum:
SCREAMING_SNAKE_CASE : str = shaaaa()
with open(__UpperCamelCase ,'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) ,B'' ):
m.update(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE : int = None
return {"num_bytes": os.path.getsize(__UpperCamelCase ), "checksum": checksum}
def lowercase__( __UpperCamelCase: Optional[int] ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 28 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 1 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
while a != 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = b % a, a
return b
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
if gcd(__UpperCamelCase ,__UpperCamelCase ) != 1:
SCREAMING_SNAKE_CASE : Tuple = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = 1, 0, a
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = 0, 1, m
while va != 0:
SCREAMING_SNAKE_CASE : Optional[Any] = ua // va
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 28 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCamelCase_ = trt.Logger(trt.Logger.WARNING)
UpperCamelCase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=3_8_4,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=1_2_8,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=2_0,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=3_0,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=4_2, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
UpperCamelCase_ = parser.parse_args()
if args.tokenizer_name:
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
UpperCamelCase_ = args.per_device_eval_batch_size
UpperCamelCase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCamelCase_ = True
UpperCamelCase_ = "temp_engine/bert-fp32.engine"
if args.fpaa:
UpperCamelCase_ = "temp_engine/bert-fp16.engine"
if args.inta:
UpperCamelCase_ = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
UpperCamelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCamelCase_ = [network.get_input(i) for i in range(network.num_inputs)]
UpperCamelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCamelCase_ = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCamelCase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCamelCase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ,__UpperCamelCase: Tuple ,__UpperCamelCase: List[str] ,__UpperCamelCase: List[str] ,__UpperCamelCase: Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = np.asarray(inputs['input_ids'] ,dtype=np.intaa )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(inputs['attention_mask'] ,dtype=np.intaa )
SCREAMING_SNAKE_CASE : Dict = np.asarray(inputs['token_type_ids'] ,dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] ,input_ids.ravel() ,__UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[1] ,attention_mask.ravel() ,__UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[2] ,token_type_ids.ravel() ,__UpperCamelCase )
# start time
SCREAMING_SNAKE_CASE : Optional[int] = time.time()
# Run inference
context.execute_async(
bindings=[int(__UpperCamelCase ) for d_inp in d_inputs] + [int(__UpperCamelCase ), int(__UpperCamelCase )] ,stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
cuda.memcpy_dtoh_async(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
SCREAMING_SNAKE_CASE : List[Any] = time.time()
SCREAMING_SNAKE_CASE : Tuple = end_time - start_time
SCREAMING_SNAKE_CASE : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCamelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCamelCase_ = raw_datasets["validation"].column_names
UpperCamelCase_ = "question" if "question" in column_names else column_names[0]
UpperCamelCase_ = "context" if "context" in column_names else column_names[1]
UpperCamelCase_ = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCamelCase_ = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
UpperCamelCase_ = min(args.max_seq_length, tokenizer.model_max_length)
def lowercase__( __UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
SCREAMING_SNAKE_CASE : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] ,examples[context_column_name if pad_on_right else question_column_name] ,truncation='only_second' if pad_on_right else 'only_first' ,max_length=__UpperCamelCase ,stride=args.doc_stride ,return_overflowing_tokens=__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,padding='max_length' ,)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
SCREAMING_SNAKE_CASE : Tuple = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
SCREAMING_SNAKE_CASE : Optional[int] = tokenized_examples.sequence_ids(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
SCREAMING_SNAKE_CASE : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
SCREAMING_SNAKE_CASE : List[str] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
UpperCamelCase_ = raw_datasets["validation"]
# Validation Feature Creation
UpperCamelCase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
UpperCamelCase_ = default_data_collator
UpperCamelCase_ = eval_dataset.remove_columns(["example_id", "offset_mapping"])
UpperCamelCase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: Any ,__UpperCamelCase: Dict="eval" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = postprocess_qa_predictions(
examples=__UpperCamelCase ,features=__UpperCamelCase ,predictions=__UpperCamelCase ,version_2_with_negative=args.version_2_with_negative ,n_best_size=args.n_best_size ,max_answer_length=args.max_answer_length ,null_score_diff_threshold=args.null_score_diff_threshold ,output_dir=args.output_dir ,prefix=__UpperCamelCase ,)
# Format the result to the format the metric expects.
if args.version_2_with_negative:
SCREAMING_SNAKE_CASE : List[Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
SCREAMING_SNAKE_CASE : Optional[int] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
SCREAMING_SNAKE_CASE : Any = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__UpperCamelCase ,label_ids=__UpperCamelCase )
UpperCamelCase_ = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__UpperCamelCase ) ) * engine.get_binding_dtype(__UpperCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
UpperCamelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCamelCase_ = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
UpperCamelCase_ = 0.0
UpperCamelCase_ = 0
UpperCamelCase_ = timeit.default_timer()
UpperCamelCase_ = None
for step, batch in enumerate(eval_dataloader):
UpperCamelCase_ , UpperCamelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCamelCase_ , UpperCamelCase_ = outputs
UpperCamelCase_ = torch.tensor(start_logits)
UpperCamelCase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCamelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
UpperCamelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
UpperCamelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCamelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
UpperCamelCase_ = nested_truncate(all_preds, len(eval_dataset))
UpperCamelCase_ = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_0_0_0 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_0_0_0))
logger.info("Total Number of Inference = %d", niter)
UpperCamelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCamelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 28 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_1_2,
"facebook/dpr-ctx_encoder-multiset-base": 5_1_2,
}
UpperCamelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": 5_1_2,
"facebook/dpr-question_encoder-multiset-base": 5_1_2,
}
UpperCamelCase_ = {
"facebook/dpr-reader-single-nq-base": 5_1_2,
"facebook/dpr-reader-multiset-base": 5_1_2,
}
UpperCamelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCamelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCamelCase_ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = VOCAB_FILES_NAMES
A : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = DPRContextEncoderTokenizer
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = VOCAB_FILES_NAMES
A : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A : List[Any] = DPRQuestionEncoderTokenizer
UpperCamelCase_ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCamelCase_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCamelCase_ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class _a :
'''simple docstring'''
def __call__( self, A, A = None, A = None, A = False, A = False, A = None, A = None, A = None, **A, ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
A, padding=A, truncation=A, max_length=A, return_tensors=A, return_attention_mask=A, **A, )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : int = titles if texts is None else texts
return super().__call__(
A, A, padding=A, truncation=A, max_length=A, return_tensors=A, return_attention_mask=A, **A, )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(A, A ) else [titles]
SCREAMING_SNAKE_CASE : int = texts if not isinstance(A, A ) else [texts]
SCREAMING_SNAKE_CASE : Union[str, Any] = len(A )
SCREAMING_SNAKE_CASE : Tuple = questions if not isinstance(A, A ) else [questions] * n_passages
assert len(A ) == len(
A ), F"There should be as many titles than texts but got {len(A )} titles and {len(A )} texts."
SCREAMING_SNAKE_CASE : str = super().__call__(A, A, padding=A, truncation=A )['input_ids']
SCREAMING_SNAKE_CASE : int = super().__call__(A, add_special_tokens=A, padding=A, truncation=A )['input_ids']
SCREAMING_SNAKE_CASE : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A, A )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : Optional[Any] = attention_mask
return self.pad(A, padding=A, max_length=A, return_tensors=A )
def UpperCamelCase_ ( self, A, A, A = 16, A = 64, A = 4, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = reader_input['input_ids']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = reader_output[:3]
SCREAMING_SNAKE_CASE : Optional[int] = len(A )
SCREAMING_SNAKE_CASE : List[str] = sorted(range(A ), reverse=A, key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = len(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=A, top_spans=A, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=A, start_index=A, end_index=A, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self, A, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : List[Any] = sorted(A, key=lambda A : x[1], reverse=A )
SCREAMING_SNAKE_CASE : str = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Dict = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = VOCAB_FILES_NAMES
A : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
A : Dict = ['''input_ids''', '''attention_mask''']
A : str = DPRReaderTokenizer
| 28 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 1 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 1 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
for i in range(1 ,num + 1 ):
fact *= i
return fact
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 0
while number > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = number % 10
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE : Optional[Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowercase__( __UpperCamelCase: int = 1_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = factorial(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = split_and_add(__UpperCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 28 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 1 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
SCREAMING_SNAKE_CASE : Optional[Any] = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ).convert('RGB' )
return image
def lowercase__( __UpperCamelCase: Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: int ,__UpperCamelCase: Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(__UpperCamelCase ,requires_grad=__UpperCamelCase ), v_bias) )
SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 3_64 if 'coco' in model_name else 2_24
SCREAMING_SNAKE_CASE : Dict = BlipaVisionConfig(image_size=__UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
SCREAMING_SNAKE_CASE : Optional[int] = OPTConfig.from_pretrained('facebook/opt-2.7b' ,eos_token_id=__UpperCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
SCREAMING_SNAKE_CASE : List[str] = OPTConfig.from_pretrained('facebook/opt-6.7b' ,eos_token_id=__UpperCamelCase ).to_dict()
elif "t5-xl" in model_name:
SCREAMING_SNAKE_CASE : int = TaConfig.from_pretrained('google/flan-t5-xl' ,dense_act_fn='gelu' ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE : Any = TaConfig.from_pretrained('google/flan-t5-xxl' ,dense_act_fn='gelu' ,bos_token_id=1 ).to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=__UpperCamelCase ,text_config=__UpperCamelCase )
return config, image_size
@torch.no_grad()
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: Union[str, Any]=None ,__UpperCamelCase: Dict=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('\n' ,add_special_tokens=__UpperCamelCase ).input_ids[0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = get_blipa_config(__UpperCamelCase ,eos_token_id=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = BlipaForConditionalGeneration(__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
SCREAMING_SNAKE_CASE : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = load_model_and_preprocess(
name=__UpperCamelCase ,model_type=__UpperCamelCase ,is_eval=__UpperCamelCase ,device=__UpperCamelCase )
original_model.eval()
print('Done!' )
# update state dict keys
SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(__UpperCamelCase )
if key.startswith('Qformer.bert' ):
SCREAMING_SNAKE_CASE : Any = key.replace('Qformer.bert' ,'qformer' )
if "attention.self" in key:
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('self' ,'attention' )
if "opt_proj" in key:
SCREAMING_SNAKE_CASE : Any = key.replace('opt_proj' ,'language_projection' )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('t5_proj' ,'language_projection' )
if key.startswith('opt' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('opt' ,'language' )
if key.startswith('t5' ):
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('t5' ,'language' )
SCREAMING_SNAKE_CASE : Optional[int] = val
# read in qv biases
read_in_q_v_bias(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = hf_model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
assert len(__UpperCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
SCREAMING_SNAKE_CASE : str = load_demo_image()
SCREAMING_SNAKE_CASE : Any = vis_processors['eval'](__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = tokenizer(['\n'] ,return_tensors='pt' ).input_ids.to(__UpperCamelCase )
# create processor
SCREAMING_SNAKE_CASE : Optional[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} ,image_mean=__UpperCamelCase ,image_std=__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=__UpperCamelCase ,tokenizer=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = processor(images=__UpperCamelCase ,return_tensors='pt' ).pixel_values.to(__UpperCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
original_model.to(__UpperCamelCase )
hf_model.to(__UpperCamelCase )
with torch.no_grad():
if "opt" in model_name:
SCREAMING_SNAKE_CASE : Any = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
SCREAMING_SNAKE_CASE : Dict = hf_model(__UpperCamelCase ,__UpperCamelCase ).logits
else:
SCREAMING_SNAKE_CASE : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
SCREAMING_SNAKE_CASE : Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-1_00 )
SCREAMING_SNAKE_CASE : Tuple = hf_model(__UpperCamelCase ,__UpperCamelCase ,labels=__UpperCamelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' ,original_logits[0, :3, :3] )
print('First values of HF logits:' ,logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] ,device=__UpperCamelCase )
assert torch.allclose(logits[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] ,device=__UpperCamelCase )
else:
# cast to same type
SCREAMING_SNAKE_CASE : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(__UpperCamelCase ) ,__UpperCamelCase ,atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
SCREAMING_SNAKE_CASE : Optional[Any] = ''
SCREAMING_SNAKE_CASE : str = tokenizer(__UpperCamelCase ,return_tensors='pt' ).input_ids.to(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = original_model.generate({'image': original_pixel_values} )
SCREAMING_SNAKE_CASE : Tuple = hf_model.generate(
__UpperCamelCase ,__UpperCamelCase ,do_sample=__UpperCamelCase ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,)
print('Original generation:' ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.shape[1]
SCREAMING_SNAKE_CASE : Dict = processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('HF generation:' ,__UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}" )
hf_model.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 1 |
'''simple docstring'''
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase__( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase__( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset_loading_script_name
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = script_dir / f"{script_name}.py"
with open(__UpperCamelCase ,'w' ) as f:
f.write(__UpperCamelCase )
return str(__UpperCamelCase )
| 28 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( A ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError()
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
import math
class _a :
'''simple docstring'''
def __init__( self, A=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = n
SCREAMING_SNAKE_CASE : Tuple = [
[math.inf for j in range(0, A )] for i in range(0, A )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Optional[Any] = [
[math.inf for j in range(0, A )] for i in range(0, A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = w
def UpperCamelCase_ ( self ):
'''simple docstring'''
for k in range(0, self.n ):
for i in range(0, self.n ):
for j in range(0, self.n ):
SCREAMING_SNAKE_CASE : Optional[Any] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCamelCase_ = datasets.utils.logging.get_logger(__name__)
UpperCamelCase_ = ["names", "prefix"]
UpperCamelCase_ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
UpperCamelCase_ = ["encoding_errors", "on_bad_lines"]
UpperCamelCase_ = ["date_format"]
@dataclass
class _a ( datasets.BuilderConfig ):
'''simple docstring'''
A : str = ","
A : Optional[str] = None
A : Optional[Union[int, List[int], str]] = "infer"
A : Optional[List[str]] = None
A : Optional[List[str]] = None
A : Optional[Union[int, str, List[int], List[str]]] = None
A : Optional[Union[List[int], List[str]]] = None
A : Optional[str] = None
A : bool = True
A : Optional[Literal["c", "python", "pyarrow"]] = None
A : Dict[Union[int, str], Callable[[Any], Any]] = None
A : Optional[list] = None
A : Optional[list] = None
A : bool = False
A : Optional[Union[int, List[int]]] = None
A : Optional[int] = None
A : Optional[Union[str, List[str]]] = None
A : bool = True
A : bool = True
A : bool = False
A : bool = True
A : Optional[str] = None
A : str = "."
A : Optional[str] = None
A : str = '"'
A : int = 0
A : Optional[str] = None
A : Optional[str] = None
A : Optional[str] = None
A : Optional[str] = None
A : bool = True
A : bool = True
A : int = 0
A : bool = True
A : bool = False
A : Optional[str] = None
A : int = 10_000
A : Optional[datasets.Features] = None
A : Optional[str] = "strict"
A : Literal["error", "warn", "skip"] = "error"
A : Optional[str] = None
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.delimiter is not None:
SCREAMING_SNAKE_CASE : List[str] = self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE : Tuple = self.column_names
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), A ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _a ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
A : Tuple = CsvConfig
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A, (str, list, tuple) ):
SCREAMING_SNAKE_CASE : List[Any] = data_files
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Dict = [files]
SCREAMING_SNAKE_CASE : int = [dl_manager.iter_files(A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : List[Any] = [files]
SCREAMING_SNAKE_CASE : List[str] = [dl_manager.iter_files(A ) for file in files]
splits.append(datasets.SplitGenerator(name=A, gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.config.features is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(A ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=A )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE : Union[str, Any] = table_cast(A, A )
return pa_table
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE : List[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A ) ):
SCREAMING_SNAKE_CASE : Any = pd.read_csv(A, iterator=A, dtype=A, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(A ):
SCREAMING_SNAKE_CASE : Dict = pa.Table.from_pandas(A )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(A )}: {e}" )
raise
| 28 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 1 |
'''simple docstring'''
import sys
import turtle
def lowercase__( __UpperCamelCase: tuple[float, float] ,__UpperCamelCase: tuple[float, float] ):
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__( __UpperCamelCase: tuple[float, float] ,__UpperCamelCase: tuple[float, float] ,__UpperCamelCase: tuple[float, float] ,__UpperCamelCase: int ,):
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
UpperCamelCase_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
UpperCamelCase_ = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 28 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 1 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCamelCase_ = logging.get_logger(__name__)
def lowercase__( __UpperCamelCase: bool ,__UpperCamelCase: bool ):
"""simple docstring"""
def run_func(__UpperCamelCase: Dict ):
@wraps(__UpperCamelCase )
def run_in_eager_mode(*__UpperCamelCase: List[str] ,**__UpperCamelCase: Union[str, Any] ):
return func(*__UpperCamelCase ,**__UpperCamelCase )
@wraps(__UpperCamelCase )
@tf.function(experimental_compile=__UpperCamelCase )
def run_in_graph_mode(*__UpperCamelCase: str ,**__UpperCamelCase: Union[str, Any] ):
return func(*__UpperCamelCase ,**__UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = random.Random()
SCREAMING_SNAKE_CASE : Optional[Any] = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__UpperCamelCase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : TensorFlowBenchmarkArguments
A : PretrainedConfig
A : str = "TensorFlow"
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_inference_func(A, A, A )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_train_func(A, A, A )
return self._measure_speed(_train )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], A )
SCREAMING_SNAKE_CASE : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
SCREAMING_SNAKE_CASE : Dict = self._prepare_inference_func(A, A, A )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], A )
SCREAMING_SNAKE_CASE : Tuple = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_train_func(A, A, A )
return self._measure_memory(_train )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
SCREAMING_SNAKE_CASE : Optional[int] = (
hasattr(A, 'architectures' )
and isinstance(config.architectures, A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE : int = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE : int = __import__('transformers', fromlist=[model_class] )
SCREAMING_SNAKE_CASE : List[Any] = getattr(A, A )
SCREAMING_SNAKE_CASE : List[str] = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
SCREAMING_SNAKE_CASE : List[str] = TF_MODEL_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE : str = config.vocab_size if hasattr(A, 'vocab_size' ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE : List[Any] = random_input_ids(A, A, A )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(A, decoder_input_ids=A, training=A )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(A, training=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
SCREAMING_SNAKE_CASE : List[Any] = (
hasattr(A, 'architectures' )
and isinstance(config.architectures, A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE : str = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE : Optional[Any] = __import__('transformers', fromlist=[model_class] )
SCREAMING_SNAKE_CASE : Dict = getattr(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
SCREAMING_SNAKE_CASE : int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE : Optional[Any] = config.vocab_size if hasattr(A, 'vocab_size' ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = random_input_ids(A, A, A )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
SCREAMING_SNAKE_CASE : List[str] = model(A, decoder_input_ids=A, labels=A, training=A )[0]
SCREAMING_SNAKE_CASE : Optional[int] = tf.gradients(A, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(A, labels=A, training=A )[0]
SCREAMING_SNAKE_CASE : Optional[int] = tf.gradients(A, model.trainable_variables )
return gradients
SCREAMING_SNAKE_CASE : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(A, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
SCREAMING_SNAKE_CASE : Tuple = timeit.repeat(
A, repeat=self.args.repeat, number=10, )
return min(A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
SCREAMING_SNAKE_CASE : str = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
SCREAMING_SNAKE_CASE : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
SCREAMING_SNAKE_CASE : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(A )
SCREAMING_SNAKE_CASE : List[str] = meminfo.used
SCREAMING_SNAKE_CASE : List[str] = Memory(A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
SCREAMING_SNAKE_CASE : str = None
else:
SCREAMING_SNAKE_CASE : Tuple = measure_peak_memory_cpu(A )
SCREAMING_SNAKE_CASE : Any = Memory(A ) if isinstance(A, A ) else memory_bytes
if self.args.trace_memory_line_by_line:
SCREAMING_SNAKE_CASE : Optional[Any] = stop_memory_tracing(A )
if memory is None:
SCREAMING_SNAKE_CASE : int = summary.total
else:
SCREAMING_SNAKE_CASE : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A=13, A=7, A=True, A=True, A=True, A=True, A=99, A=32, A=5, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=False, A=True, A="None", A=3, A=4, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : List[str] = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : List[str] = position_biased_input
SCREAMING_SNAKE_CASE : Dict = pos_att_type
SCREAMING_SNAKE_CASE : List[Any] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
SCREAMING_SNAKE_CASE : Tuple = 300
return config
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ), [] )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = DebertaModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(A, attention_mask=A, token_type_ids=A )[0]
SCREAMING_SNAKE_CASE : Optional[int] = model(A, token_type_ids=A )[0]
SCREAMING_SNAKE_CASE : List[Any] = model(A )[0]
self.parent.assertListEqual(list(sequence_output.size() ), [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = DebertaForMaskedLM(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(A, attention_mask=A, token_type_ids=A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = DebertaForSequenceClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A, attention_mask=A, token_type_ids=A, labels=A )
self.parent.assertListEqual(list(result.logits.size() ), [self.batch_size, self.num_labels] )
self.check_loss_output(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DebertaForTokenClassification(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A, attention_mask=A, token_type_ids=A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DebertaForQuestionAnswering(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : int = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Optional[int] = True
A : int = False
A : Union[str, Any] = False
A : Optional[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = DebertaModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self, config_class=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : str = DebertaModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(A, attention_mask=A )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], A, atol=1E-4 ), F"{output[:, 1:4, 1:4]}" )
| 28 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = analyze_text(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Optional[Any] = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : Dict = my_str / all_sum
my_fir_sum += prob * math.loga(__UpperCamelCase ) # entropy formula.
# print entropy
print(f"{round(-1 * my_fir_sum ):.1f}" )
# two len string
SCREAMING_SNAKE_CASE : Tuple = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Optional[int] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Optional[int] = int(__UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__UpperCamelCase )
# print second entropy
print(f"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(__UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase__( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 28 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Tuple = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = ['''input_ids''', '''attention_mask''']
A : List[Any] = RobertaTokenizer
def __init__( self, A=None, A=None, A=None, A="replace", A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=False, A=True, **A, ):
'''simple docstring'''
super().__init__(
A, A, tokenizer_file=A, errors=A, bos_token=A, eos_token=A, sep_token=A, cls_token=A, unk_token=A, pad_token=A, mask_token=A, add_prefix_space=A, trim_offsets=A, **A, )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', A ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Any = getattr(A, pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Tuple = pre_tok_class(**A )
SCREAMING_SNAKE_CASE : List[Any] = add_prefix_space
SCREAMING_SNAKE_CASE : str = 'post_processor'
SCREAMING_SNAKE_CASE : str = getattr(self.backend_tokenizer, A, A )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : str = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(state['cls'] )
SCREAMING_SNAKE_CASE : List[str] = False
if state.get('add_prefix_space', A ) != add_prefix_space:
SCREAMING_SNAKE_CASE : int = add_prefix_space
SCREAMING_SNAKE_CASE : Any = True
if state.get('trim_offsets', A ) != trim_offsets:
SCREAMING_SNAKE_CASE : Dict = trim_offsets
SCREAMING_SNAKE_CASE : int = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : str = getattr(A, state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[int] = component_class(**A )
setattr(self.backend_tokenizer, A, A )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = kwargs.get('is_split_into_words', A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A, **A )
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = kwargs.get('is_split_into_words', A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A, **A )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self._tokenizer.model.save(A, name=A )
return tuple(A )
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 28 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''note_seq''']
def __init__( self, *A, **A ):
'''simple docstring'''
requires_backends(self, ['note_seq'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['note_seq'] )
@classmethod
def UpperCamelCase_ ( cls, *A, **A ):
'''simple docstring'''
requires_backends(cls, ['note_seq'] )
| 28 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 1 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = int(__UpperCamelCase )
# Initialize Result
SCREAMING_SNAKE_CASE : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(__UpperCamelCase ):
# Find denominations
while int(__UpperCamelCase ) >= int(__UpperCamelCase ):
total_value -= int(__UpperCamelCase )
answer.append(__UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCamelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCamelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCamelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 28 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Tuple=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Optional[int]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
for old_item in old_list:
SCREAMING_SNAKE_CASE : List[Any] = old_item.replace('in_layers.0' ,'norm1' )
SCREAMING_SNAKE_CASE : Optional[Any] = new_item.replace('in_layers.2' ,'conv1' )
SCREAMING_SNAKE_CASE : Dict = new_item.replace('out_layers.0' ,'norm2' )
SCREAMING_SNAKE_CASE : int = new_item.replace('out_layers.3' ,'conv2' )
SCREAMING_SNAKE_CASE : Tuple = new_item.replace('emb_layers.1' ,'time_emb_proj' )
SCREAMING_SNAKE_CASE : Optional[Any] = new_item.replace('skip_connection' ,'conv_shortcut' )
SCREAMING_SNAKE_CASE : List[Any] = shave_segments(__UpperCamelCase ,n_shave_prefix_segments=__UpperCamelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[str]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for old_item in old_list:
SCREAMING_SNAKE_CASE : str = old_item
SCREAMING_SNAKE_CASE : Optional[int] = new_item.replace('norm.weight' ,'group_norm.weight' )
SCREAMING_SNAKE_CASE : Dict = new_item.replace('norm.bias' ,'group_norm.bias' )
SCREAMING_SNAKE_CASE : Dict = new_item.replace('proj_out.weight' ,'proj_attn.weight' )
SCREAMING_SNAKE_CASE : List[str] = new_item.replace('proj_out.bias' ,'proj_attn.bias' )
SCREAMING_SNAKE_CASE : List[Any] = shave_segments(__UpperCamelCase ,n_shave_prefix_segments=__UpperCamelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Dict=None ,__UpperCamelCase: Union[str, Any]=None ,__UpperCamelCase: Any=None ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
SCREAMING_SNAKE_CASE : Optional[int] = old_checkpoint[path]
SCREAMING_SNAKE_CASE : str = old_tensor.shape[0] // 3
SCREAMING_SNAKE_CASE : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
SCREAMING_SNAKE_CASE : Any = old_tensor.shape[0] // config['num_head_channels'] // 3
SCREAMING_SNAKE_CASE : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = old_tensor.split(channels // num_heads ,dim=1 )
SCREAMING_SNAKE_CASE : Tuple = query.reshape(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = key.reshape(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = value.reshape(__UpperCamelCase )
for path in paths:
SCREAMING_SNAKE_CASE : List[str] = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
SCREAMING_SNAKE_CASE : str = new_path.replace('middle_block.0' ,'mid_block.resnets.0' )
SCREAMING_SNAKE_CASE : int = new_path.replace('middle_block.1' ,'mid_block.attentions.0' )
SCREAMING_SNAKE_CASE : Any = new_path.replace('middle_block.2' ,'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
SCREAMING_SNAKE_CASE : Union[str, Any] = new_path.replace(replacement['old'] ,replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = old_checkpoint[path['old']][:, :, 0]
else:
SCREAMING_SNAKE_CASE : Tuple = old_checkpoint[path['old']]
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : int = checkpoint['time_embed.0.weight']
SCREAMING_SNAKE_CASE : Dict = checkpoint['time_embed.0.bias']
SCREAMING_SNAKE_CASE : List[str] = checkpoint['time_embed.2.weight']
SCREAMING_SNAKE_CASE : List[str] = checkpoint['time_embed.2.bias']
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['input_blocks.0.0.weight']
SCREAMING_SNAKE_CASE : Dict = checkpoint['input_blocks.0.0.bias']
SCREAMING_SNAKE_CASE : Dict = checkpoint['out.0.weight']
SCREAMING_SNAKE_CASE : str = checkpoint['out.0.bias']
SCREAMING_SNAKE_CASE : str = checkpoint['out.2.weight']
SCREAMING_SNAKE_CASE : Dict = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
SCREAMING_SNAKE_CASE : List[str] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
SCREAMING_SNAKE_CASE : Any = {
layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
SCREAMING_SNAKE_CASE : Tuple = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
SCREAMING_SNAKE_CASE : str = {
layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
SCREAMING_SNAKE_CASE : Any = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
SCREAMING_SNAKE_CASE : int = {
layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Tuple = (i - 1) // (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : List[str] = (i - 1) % (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : Any = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key]
SCREAMING_SNAKE_CASE : Optional[int] = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in checkpoint:
SCREAMING_SNAKE_CASE : List[str] = checkpoint[
f"input_blocks.{i}.0.op.weight"
]
SCREAMING_SNAKE_CASE : Dict = checkpoint[
f"input_blocks.{i}.0.op.bias"
]
continue
SCREAMING_SNAKE_CASE : int = renew_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = {'old': f"input_blocks.{i}.0", 'new': f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
SCREAMING_SNAKE_CASE : str = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path, resnet_op] ,config=__UpperCamelCase )
if len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = renew_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = {
'old': f"input_blocks.{i}.1",
'new': f"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
SCREAMING_SNAKE_CASE : str = {
f"input_blocks.{i}.1.qkv.bias": {
'key': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'query': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'value': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"input_blocks.{i}.1.qkv.weight": {
'key': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'query': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'value': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=__UpperCamelCase ,config=__UpperCamelCase ,)
SCREAMING_SNAKE_CASE : List[str] = middle_blocks[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = middle_blocks[1]
SCREAMING_SNAKE_CASE : List[Any] = middle_blocks[2]
SCREAMING_SNAKE_CASE : Any = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = renew_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,attention_paths_to_split=__UpperCamelCase ,config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = i // (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : str = i % (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : Dict = [shave_segments(__UpperCamelCase ,2 ) for name in output_blocks[i]]
SCREAMING_SNAKE_CASE : str = {}
for layer in output_block_layers:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = layer.split('.' )[0], shave_segments(__UpperCamelCase ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = [layer_name]
if len(__UpperCamelCase ) > 1:
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
SCREAMING_SNAKE_CASE : Dict = renew_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = renew_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = {'old': f"output_blocks.{i}.0", 'new': f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
SCREAMING_SNAKE_CASE : Optional[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[
f"output_blocks.{i}.{index}.conv.weight"
]
SCREAMING_SNAKE_CASE : List[str] = checkpoint[
f"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
SCREAMING_SNAKE_CASE : List[str] = []
if len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = {
'old': f"output_blocks.{i}.1",
'new': f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
SCREAMING_SNAKE_CASE : int = {
f"output_blocks.{i}.1.qkv.bias": {
'key': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'query': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'value': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"output_blocks.{i}.1.qkv.weight": {
'key': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'query': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'value': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None ,config=__UpperCamelCase ,)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_resnet_paths(__UpperCamelCase ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
SCREAMING_SNAKE_CASE : Dict = '.'.join(['output_blocks', str(__UpperCamelCase ), path['old']] )
SCREAMING_SNAKE_CASE : Optional[int] = '.'.join(['up_blocks', str(__UpperCamelCase ), 'resnets', str(__UpperCamelCase ), path['new']] )
SCREAMING_SNAKE_CASE : int = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase_ = json.loads(f.read())
UpperCamelCase_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase_ = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase_ = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: bool = True ,__UpperCamelCase: float = math.inf ,__UpperCamelCase: float = -math.inf ,__UpperCamelCase: float = math.inf ,__UpperCamelCase: float = -math.inf ,__UpperCamelCase: bool = False ,__UpperCamelCase: float = 1_00 ,__UpperCamelCase: float = 0.0_1 ,__UpperCamelCase: float = 1 ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = search_prob
SCREAMING_SNAKE_CASE : Any = start_temperate
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Tuple = None
while not search_end:
SCREAMING_SNAKE_CASE : str = current_state.score()
if best_state is None or current_score > best_state.score():
SCREAMING_SNAKE_CASE : Any = current_state
scores.append(__UpperCamelCase )
iterations += 1
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : List[str] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
SCREAMING_SNAKE_CASE : Optional[int] = random.randint(0 ,len(__UpperCamelCase ) - 1 ) # picking a random neighbor
SCREAMING_SNAKE_CASE : Union[str, Any] = neighbors.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
SCREAMING_SNAKE_CASE : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
SCREAMING_SNAKE_CASE : str = picked_neighbor
else:
SCREAMING_SNAKE_CASE : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
SCREAMING_SNAKE_CASE : List[Any] = picked_neighbor
SCREAMING_SNAKE_CASE : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
SCREAMING_SNAKE_CASE : Tuple = True
else:
SCREAMING_SNAKE_CASE : Dict = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__UpperCamelCase ) ,__UpperCamelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCamelCase_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
UpperCamelCase_ = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCamelCase_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
UpperCamelCase_ = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
UpperCamelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCamelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
UpperCamelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCamelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
| 28 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 1 |
'''simple docstring'''
import operator as op
UpperCamelCase_ = "scaler.pt"
UpperCamelCase_ = "pytorch_model"
UpperCamelCase_ = "random_states"
UpperCamelCase_ = "optimizer"
UpperCamelCase_ = "scheduler"
UpperCamelCase_ = "pytorch_model.bin"
UpperCamelCase_ = "pytorch_model.bin.index.json"
UpperCamelCase_ = "model.safetensors"
UpperCamelCase_ = "model.safetensors.index.json"
UpperCamelCase_ = "1.10.2"
UpperCamelCase_ = "py38"
UpperCamelCase_ = "4.17.0"
UpperCamelCase_ = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
UpperCamelCase_ = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
UpperCamelCase_ = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
UpperCamelCase_ = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
UpperCamelCase_ = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
UpperCamelCase_ = "2.0.1"
UpperCamelCase_ = ["pdsh", "standard", "openmpi", "mvapich"]
UpperCamelCase_ = ["default", "reduce-overhead", "max-autotune"]
UpperCamelCase_ = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase_ = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
UpperCamelCase_ = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
UpperCamelCase_ = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 28 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase_ = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
UpperCamelCase_ = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
UpperCamelCase_ = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ), codebase_urls=['https://www.atticusprojectai.org/cuad'], reference_urls=['https://www.atticusprojectai.org/cuad'], )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
SCREAMING_SNAKE_CASE : List[Any] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : Dict = evaluate(dataset=A, predictions=A )
return score
| 28 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCamelCase_ = TypeVar("T")
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any | T = None
SCREAMING_SNAKE_CASE : int = len(A )
SCREAMING_SNAKE_CASE : list[T] = [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE : int = fnc
self.build()
def UpperCamelCase_ ( self ):
'''simple docstring'''
for p in range(self.N - 1, 0, -1 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
p += self.N
SCREAMING_SNAKE_CASE : str = v
while p > 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = p // 2
SCREAMING_SNAKE_CASE : List[str] = self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def UpperCamelCase_ ( self, A, A ): # noqa: E741
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = l + self.N, r + self.N
SCREAMING_SNAKE_CASE : T | None = None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE : Any = self.st[l] if res is None else self.fn(A, self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = self.st[r] if res is None else self.fn(A, self.st[r] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCamelCase_ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
UpperCamelCase_ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
UpperCamelCase_ = SegmentTree(test_array, min)
UpperCamelCase_ = SegmentTree(test_array, max)
UpperCamelCase_ = SegmentTree(test_array, lambda a, b: a + b)
def lowercase__( ):
"""simple docstring"""
for i in range(len(__UpperCamelCase ) ):
for j in range(__UpperCamelCase ,len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : Dict = reduce(__UpperCamelCase ,test_array[i : j + 1] )
SCREAMING_SNAKE_CASE : Dict = reduce(__UpperCamelCase ,test_array[i : j + 1] )
SCREAMING_SNAKE_CASE : Dict = reduce(lambda __UpperCamelCase ,__UpperCamelCase : a + b ,test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__UpperCamelCase ,__UpperCamelCase )
assert max_range == max_segment_tree.query(__UpperCamelCase ,__UpperCamelCase )
assert sum_range == sum_segment_tree.query(__UpperCamelCase ,__UpperCamelCase )
test_all_segments()
for index, value in test_updates.items():
UpperCamelCase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 28 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 1 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 1 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase_ = False
class _a ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion', torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt', image=A, text_to_image_strength=0.75, generator=A, guidance_scale=7.5, num_inference_steps=2, output_type='numpy', ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[str] = VersatileDiffusionPipeline.from_pretrained(A, torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Dict = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.dual_guided(
prompt='first prompt', image=A, text_to_image_strength=0.75, generator=A, guidance_scale=7.5, num_inference_steps=2, output_type='numpy', ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion', torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = 'cyberpunk 2077'
SCREAMING_SNAKE_CASE : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.dual_guided(
prompt=A, image=A, text_to_image_strength=0.75, generator=A, guidance_scale=7.5, num_inference_steps=50, output_type='numpy', ).images
SCREAMING_SNAKE_CASE : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.text_to_image(
prompt=A, generator=A, guidance_scale=7.5, num_inference_steps=50, output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[str] = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : List[str] = pipe.image_variation(A, generator=A, output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 28 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__magic_name__ :Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowerCAmelCase , cache_dir=__lowerCAmelCase )
__magic_name__ :Dict = [t[-1] for t in os.walk(os.path.join(__lowerCAmelCase , os.listdir(__lowerCAmelCase )[0] , '''snapshots''' ) )]
__magic_name__ :Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowerCAmelCase )
__magic_name__ :Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :int = jax.random.PRNGKey(0 )
__magic_name__ :Any = 4
__magic_name__ :str = jax.device_count()
__magic_name__ :List[str] = num_samples * [prompt]
__magic_name__ :str = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :List[Any] = replicate(__lowerCAmelCase )
__magic_name__ :int = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = shard(__lowerCAmelCase )
__magic_name__ :Any = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
__magic_name__ :List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowerCAmelCase ) == num_samples
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :str = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__lowerCAmelCase )
__magic_name__ :Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :List[str] = jax.random.PRNGKey(0 )
__magic_name__ :str = 5_0
__magic_name__ :Tuple = jax.device_count()
__magic_name__ :Any = num_samples * [prompt]
__magic_name__ :Dict = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :List[Any] = replicate(__lowerCAmelCase )
__magic_name__ :List[str] = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = shard(__lowerCAmelCase )
__magic_name__ :Optional[int] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase )
__magic_name__ :List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :Tuple = jax.random.PRNGKey(0 )
__magic_name__ :str = 5_0
__magic_name__ :Optional[Any] = jax.device_count()
__magic_name__ :List[Any] = num_samples * [prompt]
__magic_name__ :List[Any] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :Dict = replicate(__lowerCAmelCase )
__magic_name__ :Tuple = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = shard(__lowerCAmelCase )
__magic_name__ :List[str] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__magic_name__ :Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :Any = jax.random.PRNGKey(0 )
__magic_name__ :Any = 5_0
__magic_name__ :Optional[Any] = jax.device_count()
__magic_name__ :Optional[Any] = num_samples * [prompt]
__magic_name__ :Optional[Any] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :Tuple = replicate(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = shard(__lowerCAmelCase )
__magic_name__ :Tuple = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
__magic_name__ , __magic_name__ :Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
__magic_name__ :Any = scheduler.create_state()
__magic_name__ :Optional[int] = scheduler_state
__magic_name__ :int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :Tuple = jax.random.PRNGKey(0 )
__magic_name__ :Any = 5_0
__magic_name__ :Dict = jax.device_count()
__magic_name__ :List[Any] = num_samples * [prompt]
__magic_name__ :Optional[int] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :List[str] = replicate(__lowerCAmelCase )
__magic_name__ :Tuple = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = shard(__lowerCAmelCase )
__magic_name__ :Tuple = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :str = jax.device_count()
__magic_name__ :str = num_samples * [prompt]
__magic_name__ :Dict = jax.random.split(jax.random.PRNGKey(0 ) , __lowerCAmelCase )
__magic_name__ , __magic_name__ :Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase , )
__magic_name__ :Any = replicate(__lowerCAmelCase )
__magic_name__ :Optional[int] = pipeline.prepare_inputs(__lowerCAmelCase )
__magic_name__ :Any = shard(__lowerCAmelCase )
__magic_name__ :int = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__magic_name__ :List[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
__magic_name__ , __magic_name__ :Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase , use_memory_efficient_attention=__lowerCAmelCase , )
__magic_name__ :List[Any] = replicate(__lowerCAmelCase )
__magic_name__ :Any = pipeline.prepare_inputs(__lowerCAmelCase )
__magic_name__ :Any = shard(__lowerCAmelCase )
__magic_name__ :Any = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__magic_name__ :List[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowerCamelCase (_a ):
def __init__( self: Optional[int],A_: Dict,A_: List[str]=13,A_: List[str]=7,A_: Dict=True,A_: Any=True,A_: Optional[Any]=False,A_: Any=True,A_: List[Any]=99,A_: Optional[Any]=32,A_: Union[str, Any]=5,A_: str=4,A_: Optional[int]=64,A_: Tuple="gelu",A_: Optional[int]=0.1,A_: str=0.1,A_: Union[str, Any]=512,A_: Optional[Any]=16,A_: Any=2,A_: Dict=0.0_2,A_: Dict=3,A_: List[Any]=4,A_: Optional[Any]=None,A_: Tuple=2,A_: Optional[int]=2,A_: int=2,A_: List[Any]=2,A_: Optional[Any]=4,A_: Any=1,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
__UpperCamelCase = q_groups
__UpperCamelCase = k_groups
__UpperCamelCase = v_groups
__UpperCamelCase = post_attention_groups
__UpperCamelCase = intermediate_groups
__UpperCamelCase = output_groups
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size],self.num_choices )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def snake_case_ ( self: List[str],A_: str,A_: str,A_: str,A_: List[Any],A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = SqueezeBertModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(A_,A_ )
__UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self: int,A_: Optional[Any],A_: List[Any],A_: List[str],A_: List[str],A_: Optional[Any],A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = SqueezeBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(A_,attention_mask=A_,labels=A_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self: str,A_: str,A_: Union[str, Any],A_: Tuple,A_: Any,A_: List[str],A_: Dict ):
'''simple docstring'''
__UpperCamelCase = SqueezeBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(
A_,attention_mask=A_,start_positions=A_,end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case_ ( self: Optional[int],A_: Optional[int],A_: List[str],A_: int,A_: Optional[int],A_: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = SqueezeBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(A_,attention_mask=A_,labels=A_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case_ ( self: str,A_: Any,A_: str,A_: Optional[Any],A_: Optional[Any],A_: Optional[Any],A_: Dict ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = SqueezeBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(A_,attention_mask=A_,labels=A_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self: Union[str, Any],A_: Tuple,A_: str,A_: Any,A_: Union[str, Any],A_: Dict,A_: str ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = SqueezeBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
__UpperCamelCase = model(
A_,attention_mask=A_,labels=A_,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase), (__UpperCamelCase), (__UpperCamelCase), (__UpperCamelCase), (__UpperCamelCase), (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowercase = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = True
_lowercase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = SqueezeBertModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,dim=37 )
def snake_case_ ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*A_ )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*A_ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A_ )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A_ )
@slow
def snake_case_ ( self: int ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = SqueezeBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
__UpperCamelCase = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
__UpperCamelCase = model(A_ )[0]
__UpperCamelCase = torch.Size((1, 3) )
self.assertEqual(output.shape,A_ )
__UpperCamelCase = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(A_,A_,atol=1E-4 ) )
| 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 0 |
from __future__ import annotations
from typing import Any
class lowerCamelCase__ ( _A):
"""simple docstring"""
pass
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : Any ) -> None:
_A = data
_A = None
def __iter__( self : int ) -> Optional[Any]:
_A = self
_A = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__lowerCAmelCase )
yield node.data
_A = node.next_node
@property
def snake_case_ ( self : str ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCAmelCase_ = Node(1)
UpperCAmelCase_ = Node(2)
UpperCAmelCase_ = Node(3)
UpperCAmelCase_ = Node(4)
print(root_node.has_loop) # False
UpperCAmelCase_ = root_node.next_node
print(root_node.has_loop) # True
UpperCAmelCase_ = Node(5)
UpperCAmelCase_ = Node(6)
UpperCAmelCase_ = Node(5)
UpperCAmelCase_ = Node(6)
print(root_node.has_loop) # False
UpperCAmelCase_ = Node(1)
print(root_node.has_loop) # False
| 2 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase : Dict = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCAmelCase : Optional[int] = {'facebook/blenderbot_small-90M': 5_12}
def A_( A : int):
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase = char
UpperCamelCase = set(A)
return pairs
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , )-> Optional[int]:
'''simple docstring'''
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {}
@property
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCamelCase = re.sub('([.,!?()])' , R' \1' , A_ )
UpperCamelCase = re.sub('(\')' , R' \1 ' , A_ )
UpperCamelCase = re.sub(R'\s{2,}' , ' ' , A_ )
if "\n" in token:
UpperCamelCase = token.replace('\n' , ' __newln__' )
UpperCamelCase = token.split(' ' )
UpperCamelCase = []
for token in tokens:
if not len(A_ ):
continue
UpperCamelCase = token.lower()
UpperCamelCase = tuple(A_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCamelCase = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(A_ ):
try:
UpperCamelCase = word.index(A_ , A_ )
new_word.extend(word[i:j] )
UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(A_ )
UpperCamelCase = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase = get_pairs(A_ )
UpperCamelCase = '@@ '.join(A_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
words.append(A_ )
return " ".join(A_ )
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = re.findall(R'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
UpperCamelCase = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
return self.decoder.get(A_ , self.unk_token )
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
UpperCamelCase = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 3 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 0 |
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__UpperCamelCase : Any = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__UpperCamelCase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_lowercase = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ = k.replace(UpperCamelCase__ , UpperCamelCase__ )
if k.startswith("""encoder""" ):
SCREAMING_SNAKE_CASE__ = k.replace(""".attn""" , """.self_attn""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
SCREAMING_SNAKE_CASE__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm3""" , """final_layer_norm""" )
return k
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
SCREAMING_SNAKE_CASE__ = sd.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
SCREAMING_SNAKE_CASE__ = v
_lowerCamelCase = ['START']
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ = model["""model"""]
SCREAMING_SNAKE_CASE__ = BlenderbotConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BlenderbotForConditionalGeneration(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE__ = rename_state_dict_key(UpperCamelCase__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase__ )
m.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
m.half()
m.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
_lowerCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 6 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger()
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : nn.Module
UpperCAmelCase : List[nn.Module] = field(default_factory=__lowerCAmelCase )
UpperCAmelCase : list = field(default_factory=__lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Tensor , _UpperCAmelCase : Tensor ):
_A = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__( self : List[str] , _UpperCAmelCase : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def lowerCAmelCase_ ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : nn.Module
UpperCAmelCase : nn.Module
UpperCAmelCase : int = 0
UpperCAmelCase : List = field(default_factory=__lowerCAmelCase )
UpperCAmelCase : List = field(default_factory=__lowerCAmelCase )
def __call__( self : Dict , _UpperCAmelCase : Tensor ):
_A = Tracker(self.dest )(_UpperCAmelCase ).parametrized
_A = Tracker(self.src )(_UpperCAmelCase ).parametrized
_A = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
_A = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while'''
F''' destination module has {len(_UpperCAmelCase )}.''' )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def _snake_case ( _snake_case : str , _snake_case : ResNetConfig , _snake_case : Path , _snake_case : bool = True ) -> Union[str, Any]:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
_A = timm.create_model(_snake_case , pretrained=_snake_case ).eval()
_A = ResNetForImageClassification(_snake_case ).eval()
_A = ModuleTransfer(src=_snake_case , dest=_snake_case )
_A = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_snake_case )
assert torch.allclose(from_model(_snake_case ) , our_model(_snake_case ).logits ), "The model logits don't match the original one."
_A = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(_snake_case )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_snake_case , )
# we can use the convnext one
_A = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_snake_case , )
print(F'''Pushed {checkpoint_name}''' )
def _snake_case ( _snake_case : Path , _snake_case : str = None , _snake_case : bool = True ) -> Tuple:
'''simple docstring'''
_A = 'imagenet-1k-id2label.json'
_A = 10_00
_A = (1, num_labels)
_A = 'huggingface/label-files'
_A = num_labels
_A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) )
_A = {int(_snake_case ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case )
_A = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_snake_case , names_to_config[model_name] , _snake_case , _snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_snake_case , _snake_case , _snake_case , _snake_case )
return config, expected_shape
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
a = parser.parse_args()
a = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 7 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Any = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
"""simple docstring"""
def _a ( self : Dict , _snake_case : List[str] ):
"""simple docstring"""
raise NotImplementedError()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : "AutoTokenizer" , _snake_case : bool = False , **_snake_case : Tuple ):
"""simple docstring"""
A__ = tokenizer
A__ = skip_prompt
A__ = decode_kwargs
# variables used in the streaming process
A__ = []
A__ = 0
A__ = True
def _a ( self : Optional[int] , _snake_case : str ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
A__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
A__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
A__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
A__ = text[self.print_len :]
A__ = []
A__ = 0
# If the last token is a CJK character, we print the characters.
elif len(_snake_case ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
A__ = text[self.print_len :]
self.print_len += len(_snake_case )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
A__ = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(_snake_case )
self.on_finalized_text(_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
if len(self.token_cache ) > 0:
A__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
A__ = text[self.print_len :]
A__ = []
A__ = 0
else:
A__ = ''
A__ = True
self.on_finalized_text(_snake_case , stream_end=_snake_case )
def _a ( self : Optional[int] , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
print(_snake_case , flush=_snake_case , end='' if not stream_end else None )
def _a ( self : Any , _snake_case : str ):
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : "AutoTokenizer" , _snake_case : bool = False , _snake_case : Optional[float] = None , **_snake_case : str ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case , **_snake_case )
A__ = Queue()
A__ = None
A__ = timeout
def _a ( self : Optional[int] , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
self.text_queue.put(_snake_case , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[int] ):
"""simple docstring"""
return self
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 9 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "deta"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , _A : Tuple=None , _A : Dict=900 , _A : Union[str, Any]=2048 , _A : Union[str, Any]=6 , _A : List[str]=2048 , _A : str=8 , _A : Optional[int]=6 , _A : List[str]=1024 , _A : Optional[int]=8 , _A : List[str]=0.0 , _A : List[str]=True , _A : Any="relu" , _A : Any=256 , _A : Optional[int]=0.1 , _A : str=0.0 , _A : Dict=0.0 , _A : str=0.02 , _A : Union[str, Any]=1.0 , _A : Union[str, Any]=True , _A : Any=False , _A : Union[str, Any]="sine" , _A : int=5 , _A : Optional[Any]=4 , _A : Any=4 , _A : Union[str, Any]=True , _A : Dict=300 , _A : List[Any]=True , _A : Any=True , _A : Tuple=1 , _A : Optional[int]=5 , _A : str=2 , _A : Tuple=1 , _A : Tuple=1 , _A : Any=5 , _A : Tuple=2 , _A : str=0.1 , _A : List[str]=0.25 , **_A : Dict , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_A , _A ):
_UpperCamelCase = backbone_config.pop('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(_A )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : int ):
return self.d_model
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 10 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = JukeboxTokenizer
__lowerCamelCase : Dict = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def a__ (self ) -> Dict:
"""simple docstring"""
import torch
_a = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
_a = tokenizer(**self.metas )['''input_ids''']
# fmt: off
_a = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def a__ (self ) -> Optional[int]:
"""simple docstring"""
import torch
_a = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
_a = tokenizer(**self.metas )['''input_ids''']
# fmt: off
_a = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 11 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 0 |
import os
lowerCamelCase__ : int = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0}
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Optional[Any] = 0
lowercase__ : List[str] = 0
while index < len(lowercase_ ) - 1:
lowercase__ : str = SYMBOLS[numerals[index]]
lowercase__ : str = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : List[Any] = """"""
lowercase__ : List[Any] = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowercase__ : List[Any] = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowercase__ : Optional[Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase ( lowercase_ = "/p089_roman.txt" ) -> int:
'''simple docstring'''
lowercase__ : Optional[int] = 0
with open(os.path.dirname(lowercase_ ) + roman_numerals_filename ) as filea:
lowercase__ : int = filea.readlines()
for line in lines:
lowercase__ : Optional[int] = line.strip()
lowercase__ : Dict = parse_roman_numerals(lowercase_ )
lowercase__ : int = generate_roman_numerals(lowercase_ )
savings += len(lowercase_ ) - len(lowercase_ )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''')
| 12 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase : Any = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__lowerCamelCase : Tuple = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase : List[str] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : Optional[int] = self.get_image_processor()
__lowerCamelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__lowerCamelCase : Optional[int] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
__lowerCamelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.get_image_processor()
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : int = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.prepare_image_inputs()
__lowerCamelCase : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__lowerCamelCase : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Dict = self.get_image_processor()
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Any = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = 'lower newer'
__lowerCamelCase : Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = self.get_image_processor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : int = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = 'lower newer'
__lowerCamelCase : List[str] = self.prepare_image_inputs()
__lowerCamelCase : Any = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
processor()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Tuple = self.get_image_processor()
__lowerCamelCase : int = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase : int = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[str] = self.get_image_processor()
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : int = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = 'lower newer'
__lowerCamelCase : int = self.prepare_image_inputs()
__lowerCamelCase : Any = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 13 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''PerceiverFeatureExtractor''']
a__ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A : Union[str, Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
A__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = v.to_dict()
return d
| 15 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 0 |
import os
def __a ( ):
with open(os.path.dirname(A__ ) + "/p022_names.txt" ) as file:
SCREAMING_SNAKE_CASE = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE = names.replace("\"" , "" ).split("," )
names.sort()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE = 0
return total_score
if __name__ == "__main__":
print(solution()) | 16 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__A : Dict = nums[0]
for i in range(1 ,len(a__ ) ):
__A : List[Any] = nums[i]
__A : Optional[Any] = max(a__ ,ans + num ,a__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[Any] = int(input('''Enter number of elements : ''').strip())
UpperCAmelCase_ : Optional[int] = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 17 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_SCREAMING_SNAKE_CASE = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
_SCREAMING_SNAKE_CASE = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
_SCREAMING_SNAKE_CASE = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_lowerCAmelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_lowerCAmelCase = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 18 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__snake_case, __snake_case ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(__snake_case ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 0 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _lowercase( __a : Dict ):
a__ =SwinConfig()
a__ =swin_name.split('_' )
a__ =name_split[1]
a__ =int(name_split[4] )
a__ =int(name_split[3][-1] )
if model_size == "tiny":
a__ =96
a__ =(2, 2, 6, 2)
a__ =(3, 6, 12, 24)
elif model_size == "small":
a__ =96
a__ =(2, 2, 18, 2)
a__ =(3, 6, 12, 24)
elif model_size == "base":
a__ =128
a__ =(2, 2, 18, 2)
a__ =(4, 8, 16, 32)
else:
a__ =192
a__ =(2, 2, 18, 2)
a__ =(6, 12, 24, 48)
if "in22k" in swin_name:
a__ =2_1841
else:
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =img_size
a__ =num_classes
a__ =embed_dim
a__ =depths
a__ =num_heads
a__ =window_size
return config
def _lowercase( __a : Any ):
if "patch_embed.proj" in name:
a__ =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a__ =name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a__ ='encoder.' + name
if "attn.proj" in name:
a__ =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a__ =name.replace('attn' , 'attention.self' )
if "norm1" in name:
a__ =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a__ =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a__ =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a__ =name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a__ ='layernorm.weight'
if name == "norm.bias":
a__ ='layernorm.bias'
if "head" in name:
a__ =name.replace('head' , 'classifier' )
else:
a__ ='swin.' + name
return name
def _lowercase( __a : List[str] , __a : List[str] ):
for key in orig_state_dict.copy().keys():
a__ =orig_state_dict.pop(__a )
if "mask" in key:
continue
elif "qkv" in key:
a__ =key.split('.' )
a__ =int(key_split[1] )
a__ =int(key_split[3] )
a__ =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a__ =val[:dim, :]
a__ =val[
dim : dim * 2, :
]
a__ =val[-dim:, :]
else:
a__ =val[
:dim
]
a__ =val[
dim : dim * 2
]
a__ =val[
-dim:
]
else:
a__ =val
return orig_state_dict
def _lowercase( __a : List[str] , __a : List[Any] ):
a__ =timm.create_model(__a , pretrained=__a )
timm_model.eval()
a__ =get_swin_config(__a )
a__ =SwinForImageClassification(__a )
model.eval()
a__ =convert_state_dict(timm_model.state_dict() , __a )
model.load_state_dict(__a )
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a__ =Image.open(requests.get(__a , stream=__a ).raw )
a__ =image_processor(images=__a , return_tensors='pt' )
a__ =timm_model(inputs['pixel_values'] )
a__ =model(**__a ).logits
assert torch.allclose(__a , __a , atol=1e-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase: Tuple = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase_ : Any = 4
UpperCAmelCase_ : List[str] = 3
class __A ( UpperCamelCase__ ):
pass
def lowerCAmelCase_ ( lowerCamelCase ):
for shard in shards:
for i in range(lowerCamelCase ):
yield {"i": i, "shard": shard}
def lowerCAmelCase_ ( ):
__magic_name__ : Tuple =int(os.environ["""RANK"""] )
__magic_name__ : List[Any] =int(os.environ["""WORLD_SIZE"""] )
__magic_name__ : Optional[Any] =ArgumentParser()
parser.add_argument("""--streaming""" , type=lowerCamelCase )
parser.add_argument("""--local_rank""" , type=lowerCamelCase )
parser.add_argument("""--num_workers""" , type=lowerCamelCase , default=0 )
__magic_name__ : List[str] =parser.parse_args()
__magic_name__ : int =args.streaming
__magic_name__ : Optional[int] =args.num_workers
__magic_name__ : str ={"""shards""": [F"shard_{shard_idx}" for shard_idx in range(lowerCamelCase )]}
__magic_name__ : Optional[Any] =IterableDataset.from_generator(lowerCamelCase , gen_kwargs=lowerCamelCase )
if not streaming:
__magic_name__ : Dict =Dataset.from_list(list(lowerCamelCase ) )
__magic_name__ : Tuple =split_dataset_by_node(lowerCamelCase , rank=lowerCamelCase , world_size=lowerCamelCase )
__magic_name__ : List[Any] =torch.utils.data.DataLoader(lowerCamelCase , num_workers=lowerCamelCase )
__magic_name__ : Dict =NUM_SHARDS * NUM_ITEMS_PER_SHARD
__magic_name__ : Tuple =full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__magic_name__ : Union[str, Any] =sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 21 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
def _snake_case (__lowercase):
if not isinstance(__lowercase , __lowercase):
raise TypeError('only integers accepted as input')
else:
UpperCamelCase_ = str(abs(__lowercase))
UpperCamelCase_ = [list(__lowercase) for char in range(len(__lowercase))]
for index in range(len(__lowercase)):
num_transpositions[index].pop(__lowercase)
return max(
int(''.join(list(__lowercase))) for transposition in num_transpositions)
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 23 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] )-> str:
'''simple docstring'''
for attribute in key.split('''.''' ):
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
else:
__snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] )-> List[str]:
'''simple docstring'''
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(_lowerCamelCase )[0].split('''.''' )[-2]
__snake_case = mapped_key.replace('''*''' , _lowerCamelCase )
if "weight_g" in name:
__snake_case = '''weight_g'''
elif "weight_v" in name:
__snake_case = '''weight_v'''
elif "weight" in name:
__snake_case = '''weight'''
elif "bias" in name:
__snake_case = '''bias'''
else:
__snake_case = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] )-> Optional[Any]:
'''simple docstring'''
__snake_case = full_name.split('''conv_layers.''' )[-1]
__snake_case = name.split('''.''' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] )-> Union[str, Any]:
'''simple docstring'''
__snake_case = SEWConfig()
if is_finetuned:
__snake_case = model.wav_encoder.wav_model.cfg
else:
__snake_case = model.cfg
__snake_case = fs_config.conv_bias
__snake_case = eval(fs_config.conv_feature_layers )
__snake_case = [x[0] for x in conv_layers]
__snake_case = [x[1] for x in conv_layers]
__snake_case = [x[2] for x in conv_layers]
__snake_case = '''gelu'''
__snake_case = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
__snake_case = 0.0
__snake_case = fs_config.activation_fn.name
__snake_case = fs_config.encoder_embed_dim
__snake_case = 0.02
__snake_case = fs_config.encoder_ffn_embed_dim
__snake_case = 1E-5
__snake_case = fs_config.encoder_layerdrop
__snake_case = fs_config.encoder_attention_heads
__snake_case = fs_config.conv_pos_groups
__snake_case = fs_config.conv_pos
__snake_case = len(_lowerCamelCase )
__snake_case = fs_config.encoder_layers
__snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__snake_case = model.cfg
__snake_case = fs_config.final_dropout
__snake_case = fs_config.layerdrop
__snake_case = fs_config.activation_dropout
__snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__snake_case = fs_config.attention_dropout
__snake_case = fs_config.dropout_input
__snake_case = fs_config.dropout
__snake_case = fs_config.mask_channel_length
__snake_case = fs_config.mask_channel_prob
__snake_case = fs_config.mask_length
__snake_case = fs_config.mask_prob
__snake_case = '''Wav2Vec2FeatureExtractor'''
__snake_case = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=True )-> int:
'''simple docstring'''
if is_finetuned:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__snake_case = SEWConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case = convert_config(model[0] , _lowerCamelCase )
__snake_case = model[0].eval()
__snake_case = True if config.feat_extract_norm == '''layer''' else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
__snake_case = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(_lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
__snake_case = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCamelCase , )
__snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case = SEWForCTC(_lowerCamelCase )
else:
__snake_case = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase_ : str = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 24 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(a , "num_attention_heads" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , a : List[str] , a : List[str]=13 , a : Union[str, Any]=64 , a : List[Any]=3 , a : Dict=3 , a : int=2 , a : str=1 , a : str=16 , a : Union[str, Any]=[128, 256, 384] , a : int=[4, 6, 8] , a : List[Any]=[2, 3, 4] , a : Dict=[16, 16, 16] , a : Union[str, Any]=0 , a : Any=[2, 2, 2] , a : Tuple=[2, 2, 2] , a : Optional[Any]=0.02 , a : Optional[int]=True , a : List[str]=True , a : Dict=2 , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = stride
SCREAMING_SNAKE_CASE : Optional[Any] = padding
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : Dict = key_dim
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : int = attention_ratio
SCREAMING_SNAKE_CASE : int = mlp_ratio
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : Any = initializer_range
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __UpperCamelCase ( self : int , a : Any , a : Tuple , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = LevitModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a )
SCREAMING_SNAKE_CASE : List[str] = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Optional[int] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __UpperCamelCase ( self : Any , a : List[Any] , a : Union[str, Any] , a : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = LevitForImageClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = LevitModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not output attentions" )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
def check_hidden_states_output(a : Dict , a : List[Any] , a : str ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.hidden_states
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths ) + 1
self.assertEqual(len(a ) , a )
SCREAMING_SNAKE_CASE : List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Optional[int] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE : Any = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(a , a , a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple , a : int , a : str , a : List[str]=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : str = model_class(a )
model.to(a )
model.train()
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(a , a , return_labels=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a ).loss
loss.backward()
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : str = True
for model_class in self.all_model_classes:
if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : Any = model_class(a )
model.gradient_checkpointing_enable()
model.to(a )
model.train()
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(a , a , return_labels=a )
SCREAMING_SNAKE_CASE : Dict = model(**a ).loss
loss.backward()
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE : str = problem_type["title"]
SCREAMING_SNAKE_CASE : List[str] = problem_type["num_labels"]
SCREAMING_SNAKE_CASE : str = model_class(a )
model.to(a )
model.train()
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(a , a , return_labels=a )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a ) as warning_list:
SCREAMING_SNAKE_CASE : List[Any] = model(**a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = LevitModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
a )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([1.0448, -0.3745, -1.8317] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) | 25 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _A ( __lowercase ):
lowercase__: Any = '''bloom'''
lowercase__: List[str] = ['''past_key_values''']
lowercase__: Any = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : str , __magic_name__ : str=25_08_80 , __magic_name__ : Optional[Any]=64 , __magic_name__ : Dict=2 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=1E-5 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=1 , __magic_name__ : int=2 , __magic_name__ : List[str]=False , __magic_name__ : str=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=1 , __magic_name__ : Any=False , **__magic_name__ : List[Any] , ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
__snake_case : List[str] = kwargs.pop("""n_embed""" , __magic_name__ )
__snake_case : Union[str, Any] = hidden_size if n_embed is None else n_embed
__snake_case : Union[str, Any] = n_layer
__snake_case : Dict = n_head
__snake_case : Dict = layer_norm_epsilon
__snake_case : int = initializer_range
__snake_case : Optional[int] = use_cache
__snake_case : List[str] = pretraining_tp
__snake_case : int = apply_residual_connection_post_layernorm
__snake_case : str = hidden_dropout
__snake_case : Tuple = attention_dropout
__snake_case : int = bos_token_id
__snake_case : Any = eos_token_id
__snake_case : Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
class _A ( __lowercase ):
lowercase__: Optional[int] = version.parse('''1.12''' )
def __init__( self : Optional[int] , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Optional[int]:
"""simple docstring"""
super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ )
if not getattr(self._config , """pad_token_id""" , __magic_name__ ):
# TODO: how to do that better?
__snake_case : str = 0
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" , inverted_values_shape=__magic_name__ )
__snake_case : List[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case : Any = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def lowercase__ ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1E-3
def lowercase__ ( self : List[Any] , __magic_name__ : "PreTrainedTokenizer" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : int = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
__snake_case : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case : Tuple = seqlen + 2
__snake_case : List[str] = self._config.hidden_size // self.num_attention_heads
__snake_case : Any = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__snake_case : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__snake_case : Dict = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
__snake_case : Union[str, Any] = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case : Any = ordered_inputs["""attention_mask"""].dtype
__snake_case : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return 13
| 26 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=True , snake_case_=False , snake_case_=10 , snake_case_=3 , snake_case_=32 * 8 , snake_case_=32 * 8 , snake_case_=4 , snake_case_=64 , ):
_A = parent
_A = batch_size
_A = is_training
_A = use_auxiliary_loss
_A = num_queries
_A = num_channels
_A = min_size
_A = max_size
_A = num_labels
_A = hidden_dim
_A = hidden_dim
def lowerCAmelCase__ ( self ):
_A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
_A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
_A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
_A = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
_A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self ):
_A = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_A = self.num_queries
_A = self.num_labels
_A = [1, 1, 1, 1]
_A = self.num_channels
_A = 64
_A = 128
_A = self.hidden_dim
_A = self.hidden_dim
_A = self.hidden_dim
return config
def lowerCAmelCase__ ( self ):
_A, _A, _A, _A, _A = self.prepare_config_and_inputs()
_A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = output.encoder_hidden_states
_A = output.pixel_decoder_hidden_states
_A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_layers )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False ):
with torch.no_grad():
_A = MaskaFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_A = model(snake_case_ , output_hidden_states=snake_case_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MaskaFormerForUniversalSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_A = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_A = model(snake_case_ )
comm_check_on_output(snake_case_ )
_A = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__magic_name__ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = MaskaFormerModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowerCAmelCase__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_A = MaskaFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase__ ( self ):
_A = (self.model_tester.min_size,) * 2
_A = {
'pixel_values': torch.randn((2, 3, *size) , device=snake_case_ ),
'mask_labels': torch.randn((2, 10, *size) , device=snake_case_ ),
'class_labels': torch.zeros(2 , 10 , device=snake_case_ ).long(),
}
_A = self.model_tester.get_config()
_A = MaskaFormerForUniversalSegmentation(snake_case_ ).to(snake_case_ )
_A = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ ).to(snake_case_ )
_A = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self ):
if not self.model_tester.is_training:
return
_A = self.all_model_classes[1]
_A, _A, _A, _A, _A = self.model_tester.prepare_config_and_inputs()
_A = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_A = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def lowerCAmelCase__ ( self ):
_A = self.all_model_classes[1]
_A, _A, _A, _A, _A = self.model_tester.prepare_config_and_inputs()
_A = True
_A = True
_A = model_class(snake_case_ ).to(snake_case_ )
model.train()
_A = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
_A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A : Optional[Any] = 1E-4
def __lowerCAmelCase( ) -> Tuple:
"""simple docstring"""
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self ):
_A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(snake_case_ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(snake_case_ , return_tensors='pt' ).to(snake_case_ )
_A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 384, 384) )
with torch.no_grad():
_A = model(**snake_case_ )
_A = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_A = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_A = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self ):
_A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case_ ).eval()
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(snake_case_ , return_tensors='pt' ).to(snake_case_ )
_A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 384, 384) )
with torch.no_grad():
_A = model(**snake_case_ )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_A = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_A = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self ):
_A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case_ ).eval()
_A = self.default_image_processor
_A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
_A = inputs['pixel_values'].to(snake_case_ )
_A = [el.to(snake_case_ ) for el in inputs['mask_labels']]
_A = [el.to(snake_case_ ) for el in inputs['class_labels']]
with torch.no_grad():
_A = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 27 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase_ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
lowerCamelCase_ = f"{src_lang}-{tgt_lang}"
lowerCamelCase_ = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
print(f"Generating {path}" )
with open(lowerCAmelCase__ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(lowerCAmelCase__ )
# make sure we are under the root of the project
A_ = Path(__file__).resolve().parent.parent.parent
A_ = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
A_ = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 29 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 0 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = len(_lowercase )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCAmelCase_ : int = 0
print(_lowercase , end=''',''' )
# Consider rest of the activities
for j in range(_lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowercase , end=''',''' )
UpperCAmelCase_ : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = [1, 3, 0, 5, 8, 5]
__a = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 30 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 0 |
from __future__ import annotations
import requests
lowerCamelCase__ : int = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "new" , __UpperCAmelCase : list | None = None ) -> dict:
SCREAMING_SNAKE_CASE_ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__UpperCAmelCase ) - valid_terms ) ):
SCREAMING_SNAKE_CASE_ = f"Invalid search term: {invalid_search_terms}"
raise ValueError(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_29:
raise requests.HTTPError
SCREAMING_SNAKE_CASE_ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__UpperCAmelCase )}
SCREAMING_SNAKE_CASE_ = {}
for id_ in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext'])) | 31 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase_ = TypeVar("T")
UpperCAmelCase_ = Union[List[T], Tuple[T, ...]]
UpperCAmelCase_ = Union[T, List[T], Dict[str, T]]
UpperCAmelCase_ = Union[str, bytes, os.PathLike] | 32 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCamelCase__ : str = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase__ : List[str] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase__ : Any = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase__ : Optional[Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCamelCase__ : Dict = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase__ : Tuple = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase__ : Dict = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase__ : List[Any] = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase__ : Dict = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase__ : List[Any] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCamelCase__ : Dict = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCamelCase__ : Dict = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
if _re_test_backend.search(__lowerCAmelCase ) is None:
return None
snake_case__ = [b[0] for b in _re_backend.findall(__lowerCAmelCase )]
backends.sort()
return "_and_".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ = f.readlines()
snake_case__ = 0
while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case__ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
snake_case__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCAmelCase ):
snake_case__ = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0]
snake_case__ = re.findall('''\[([^\]]+)\]''' , __lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
snake_case__ = _re_import_struct_key_value.search(__lowerCAmelCase )
if single_line_import_search is not None:
snake_case__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
snake_case__ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
snake_case__ = lines[line_index]
if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None:
snake_case__ = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(''', ''' )
snake_case__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_between_brackets.search(__lowerCAmelCase ) is not None:
snake_case__ = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(''', ''' )
snake_case__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_quote_object.search(__lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
snake_case__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case__ = []
while (
line_index < len(__lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
snake_case__ = lines[line_index]
snake_case__ = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case__ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
snake_case__ = lines[line_index]
snake_case__ = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
def find_duplicates(__lowerCAmelCase ):
return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case__ = []
for key in import_dict_objects.keys():
snake_case__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
snake_case__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case__ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
snake_case__ = os.path.join(__lowerCAmelCase , '''__init__.py''' )
snake_case__ = parse_init(__lowerCAmelCase )
if objects is not None:
snake_case__ = analyze_results(*__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > 0:
raise ValueError('''\n\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
snake_case__ = []
for path, directories, files in os.walk(__lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCAmelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
snake_case__ = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) )
snake_case__ = short_path.replace(os.path.sep , '''.''' )
submodules.append(__lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
snake_case__ = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) )
snake_case__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__lowerCAmelCase )
return submodules
lowerCamelCase__ : Optional[int] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
# This is to make sure the transformers module imported is the one in the repo.
snake_case__ = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(__lowerCAmelCase , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
snake_case__ = spec.loader.load_module()
snake_case__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__lowerCAmelCase ) > 0:
snake_case__ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 33 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
UpperCamelCase = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , x.transpose()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , transpose(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , transpose(lowerCamelCase_ , axes=(1, 2, 0)).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , transpose(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , transpose(lowerCamelCase_ , axes=(1, 2, 0)).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , np.asarray(transpose(lowerCamelCase_))))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , np.asarray(transpose(lowerCamelCase_ , axes=(1, 2, 0)))))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , np.reshape(lowerCamelCase_ , (4, 3))))
UpperCamelCase = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , np.reshape(lowerCamelCase_ , (1_2, 5))))
@require_torch
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , reshape(lowerCamelCase_ , (4, 3)).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , reshape(lowerCamelCase_ , (1_2, 5)).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , reshape(lowerCamelCase_ , (4, 3)).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , reshape(lowerCamelCase_ , (1_2, 5)).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , np.asarray(reshape(lowerCamelCase_ , (4, 3)))))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , np.asarray(reshape(lowerCamelCase_ , (1_2, 5)))))
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , np.squeeze(lowerCamelCase_)))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , np.squeeze(lowerCamelCase_ , axis=2)))
@require_torch
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = np.random.randn(1 , 3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , squeeze(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , squeeze(lowerCamelCase_ , axis=2).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(1 , 3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , squeeze(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , squeeze(lowerCamelCase_ , axis=2).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = np.random.randn(1 , 3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , np.asarray(squeeze(lowerCamelCase_))))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , np.asarray(squeeze(lowerCamelCase_ , axis=2))))
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , np.expand_dims(lowerCamelCase_ , axis=1)))
@require_torch
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , expand_dims(lowerCamelCase_ , axis=1).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , expand_dims(lowerCamelCase_ , axis=1).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , np.asarray(expand_dims(lowerCamelCase_ , axis=1)))) | 34 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 0 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ :Union[str, Any] = 16
a_ :Any = 32
def a ( A__ , A__ , A__ , A__ , A__ = 1_6 ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : Any = DatasetDict(
{
'''train''': dataset['''train'''].select(A__ ),
'''validation''': dataset['''train'''].select(A__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
A__ , batched=A__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Tuple = 8
else:
SCREAMING_SNAKE_CASE__ : int = None
return tokenizer.pad(
A__ , padding='''longest''' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
SCREAMING_SNAKE_CASE__ : Tuple = DataLoader(
tokenized_datasets['''test'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader, test_dataloader
def a ( A__ , A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# Download the dataset
SCREAMING_SNAKE_CASE__ : List[Any] = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
SCREAMING_SNAKE_CASE__ : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config['''lr''']
SCREAMING_SNAKE_CASE__ : List[Any] = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(A__ )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE__ : Optional[Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
SCREAMING_SNAKE_CASE__ : List[str] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_fold_dataloaders(
A__ , A__ , A__ , A__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Optional[int] = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=1_0_0 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**A__ )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.loss
SCREAMING_SNAKE_CASE__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**A__ )
SCREAMING_SNAKE_CASE__ : Dict = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A__ , references=A__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A__ )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE__ : List[str] = []
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**A__ )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.logits
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat(A__ , dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.stack(A__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ : List[str] = metric.compute(predictions=A__ , references=A__ )
accelerator.print('''Average test metrics from all folds:''' , A__ )
def a ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A__ , default=A__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=A__ , default=3 , help='''The number of splits to perform across the dataset''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 35 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : Optional[int] = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCamelCase( self : List[Any] ):
a__ : Any = 1
a__ : Optional[int] = 3
a__ : Optional[int] = (32, 32)
a__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase__ )
return image
@property
def _UpperCamelCase( self : Union[str, Any] ):
torch.manual_seed(0 )
a__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _UpperCamelCase( self : Optional[Any] ):
torch.manual_seed(0 )
a__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _UpperCamelCase( self : List[str] ):
torch.manual_seed(0 )
a__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase__ )
@property
def _UpperCamelCase( self : Union[str, Any] ):
def extract(*lowerCamelCase__ : List[Any] , **lowerCamelCase__ : int ):
class A__ :
"""simple docstring"""
def __init__( self : Tuple ):
a__ : Dict = torch.ones([0] )
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ):
self.pixel_values.to(lowerCamelCase__ )
return self
return Out()
return extract
def _UpperCamelCase( self : Tuple ):
a__ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Dict = self.dummy_cond_unet
a__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
a__ : Tuple = self.dummy_vae
a__ : Optional[int] = self.dummy_text_encoder
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a__ : Tuple = StableDiffusionPipeline(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , )
a__ : Any = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Tuple = "A painting of a squirrel eating a burger"
a__ : Union[str, Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
a__ : List[Any] = sd_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a__ : List[Any] = output.images
a__ : Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
a__ : Dict = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCamelCase__ , )[0]
a__ : Tuple = image[0, -3:, -3:, -1]
a__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : Optional[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase( self : Union[str, Any] ):
a__ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Optional[Any] = self.dummy_cond_unet
a__ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
a__ : Any = self.dummy_vae
a__ : List[Any] = self.dummy_text_encoder
a__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a__ : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , )
a__ : int = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Dict = "A painting of a squirrel eating a burger"
a__ : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
a__ : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a__ : int = output.images
a__ : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
a__ : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCamelCase__ , )[0]
a__ : str = image[0, -3:, -3:, -1]
a__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : Tuple = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase( self : Tuple ):
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(pipe.scheduler , lowerCamelCase__ )
assert pipe.safety_checker is None
a__ : Optional[int] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a__ : Optional[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[Any] = self.dummy_cond_unet
a__ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
a__ : Tuple = self.dummy_vae
a__ : str = self.dummy_text_encoder
a__ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
a__ : Union[str, Any] = unet.half()
a__ : List[Any] = vae.half()
a__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
a__ : Tuple = StableDiffusionPipeline(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , )
a__ : List[Any] = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : List[str] = "A painting of a squirrel eating a burger"
a__ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Any ):
a__ : Tuple = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=lowerCamelCase__ )
a__ : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a__ : Optional[Any] = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Union[str, Any] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
a__ : Optional[Any] = 4_003_660_346
a__ : List[str] = 7
# without safety guidance (sld_guidance_scale = 0)
a__ : str = torch.manual_seed(lowerCamelCase__ )
a__ : Any = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
a__ : str = output.images
a__ : List[Any] = image[0, -3:, -3:, -1]
a__ : Tuple = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
a__ : List[Any] = torch.manual_seed(lowerCamelCase__ )
a__ : Optional[Any] = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a__ : Dict = output.images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
a__ : Dict = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase( self : Tuple ):
a__ : Tuple = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=lowerCamelCase__ )
a__ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a__ : Optional[int] = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Optional[int] = "padme amidala taking a bath artwork, safe for work, no nudity"
a__ : Tuple = 2_734_971_755
a__ : Union[str, Any] = 7
a__ : List[Any] = torch.manual_seed(lowerCamelCase__ )
a__ : List[str] = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
a__ : Tuple = output.images
a__ : List[str] = image[0, -3:, -3:, -1]
a__ : Optional[Any] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
a__ : Optional[Any] = torch.manual_seed(lowerCamelCase__ )
a__ : List[str] = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a__ : List[Any] = output.images
a__ : Optional[Any] = image[0, -3:, -3:, -1]
a__ : str = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase( self : Optional[Any] ):
a__ : Any = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
a__ : Dict = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Any = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
a__ : Tuple = 1_044_355_234
a__ : List[Any] = 12
a__ : str = torch.manual_seed(lowerCamelCase__ )
a__ : Any = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
a__ : int = output.images
a__ : Optional[int] = image[0, -3:, -3:, -1]
a__ : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
a__ : Dict = torch.manual_seed(lowerCamelCase__ )
a__ : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a__ : Dict = output.images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
a__ : Tuple = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 37 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "data2vec-vision"
def __init__( self : int , _UpperCamelCase : int=7_6_8 , _UpperCamelCase : str=1_2 , _UpperCamelCase : str=1_2 , _UpperCamelCase : int=3_0_7_2 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : str=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : Optional[int]=1e-12 , _UpperCamelCase : Optional[int]=2_2_4 , _UpperCamelCase : str=1_6 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : List[Any]=False , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Tuple=True , _UpperCamelCase : Optional[int]=[3, 5, 7, 1_1] , _UpperCamelCase : Dict=[1, 2, 3, 6] , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Dict=0.4 , _UpperCamelCase : Any=2_5_6 , _UpperCamelCase : List[str]=1 , _UpperCamelCase : Dict=False , _UpperCamelCase : Union[str, Any]=2_5_5 , **_UpperCamelCase : Dict , ) ->List[Any]:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = version.parse("1.11" )
@property
def snake_case__( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case__( self : Tuple ) ->float:
return 1e-4 | 39 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : int = PriorTransformer
UpperCAmelCase__ : Optional[Any] = "hidden_states"
@property
def snake_case_ ( self ) -> Any:
UpperCamelCase : List[str] = 4
UpperCamelCase : Dict = 8
UpperCamelCase : int = 7
UpperCamelCase : Dict = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0 ) -> Optional[Any]:
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = 4
UpperCamelCase : int = 8
UpperCamelCase : Tuple = 7
UpperCamelCase : Tuple = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case_ ( self ) -> Optional[Any]:
return (4, 8)
@property
def snake_case_ ( self ) -> str:
return (4, 8)
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
UpperCamelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self ) -> str:
UpperCamelCase , UpperCamelCase : Dict = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy', output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ), 0 )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case_ ( self ) -> int:
UpperCamelCase , UpperCamelCase : Optional[int] = self.prepare_init_args_and_inputs_for_common()
UpperCamelCase : Union[str, Any] = self.model_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Tuple = [*signature.parameters.keys()]
UpperCamelCase : List[Any] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[Any] = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_, 'set_default_attn_processor' ):
model.set_default_attn_processor()
UpperCamelCase : int = self.get_dummy_seed_input()
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : Dict = output[0, :5].flatten().cpu()
print(SCREAMING_SNAKE_CASE_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCamelCase : Optional[int] = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, rtol=1e-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=77, SCREAMING_SNAKE_CASE_=0 ) -> Any:
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = batch_size
UpperCamelCase : Any = embedding_dim
UpperCamelCase : Any = num_embeddings
UpperCamelCase : Dict = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior', subfolder='prior' )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = self.get_dummy_seed_input(seed=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase : List[Any] = model(**SCREAMING_SNAKE_CASE_ )[0]
assert list(sample.shape ) == [1, 768]
UpperCamelCase : List[str] = sample[0, :8].flatten().cpu()
print(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 )
| 40 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase__ = 5_0000
lowerCAmelCase__ = 5000
lowerCAmelCase__ , lowerCAmelCase__ = os.path.split(__file__)
lowerCAmelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def _A ( A__ , A__ ):
"""simple docstring"""
for i in range(A__ ):
__lowercase = dataset[i]
@get_duration
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
__lowercase = dataset[i : i + batch_size]
@get_duration
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
__lowercase = dataset[i]
@get_duration
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
__lowercase = dataset[i : i + batch_size]
def _A ( ):
"""simple docstring"""
__lowercase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__lowercase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
__lowercase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__lowercase = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__lowercase = generate_example_dataset(
os.path.join(A__ , '''dataset.arrow''' ) , A__ , num_examples=A__ , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
__lowercase = func(A__ , **A__ )
print('''shuffling dataset''' )
__lowercase = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(A__ ) )
__lowercase = func(
A__ , **A__ )
with open(A__ , '''wb''' ) as f:
f.write(json.dumps(A__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 41 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = params
lowerCamelCase_ = np.array(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = np.array([len(SCREAMING_SNAKE_CASE_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Any:
'''simple docstring'''
return len(self.lengths )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.params.max_model_input_size
lowerCamelCase_ = self.lengths > max_len
logger.info(f'''Splitting {sum(SCREAMING_SNAKE_CASE_ )} too long sequences.''' )
def divide_chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
lowerCamelCase_ = []
lowerCamelCase_ = []
if self.params.mlm:
lowerCamelCase_ ,lowerCamelCase_ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCamelCase_ ,lowerCamelCase_ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCamelCase_ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCamelCase_ = np.insert(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ )
if sub_s[-1] != sep_id:
lowerCamelCase_ = np.insert(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE_ )
new_tok_ids.extend(SCREAMING_SNAKE_CASE_ )
new_lengths.extend([len(SCREAMING_SNAKE_CASE_ ) for l in sub_seqs] )
lowerCamelCase_ = np.array(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = np.array(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = len(self )
lowerCamelCase_ = self.lengths > 11
lowerCamelCase_ = self.token_ids[indices]
lowerCamelCase_ = self.lengths[indices]
lowerCamelCase_ = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCamelCase_ = self.params.special_tok_ids['unk_token']
lowerCamelCase_ = len(self )
lowerCamelCase_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCamelCase_ = (unk_occs / self.lengths) < 0.5
lowerCamelCase_ = self.token_ids[indices]
lowerCamelCase_ = self.lengths[indices]
lowerCamelCase_ = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = [t[0] for t in batch]
lowerCamelCase_ = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
# Max for paddings
lowerCamelCase_ = max(SCREAMING_SNAKE_CASE_ )
# Pad token ids
if self.params.mlm:
lowerCamelCase_ = self.params.special_tok_ids['pad_token']
else:
lowerCamelCase_ = self.params.special_tok_ids['unk_token']
lowerCamelCase_ = [list(t.astype(SCREAMING_SNAKE_CASE_ ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE_ )) for t in token_ids]
assert len(tk_ ) == len(SCREAMING_SNAKE_CASE_ )
assert all(len(SCREAMING_SNAKE_CASE_ ) == max_seq_len_ for t in tk_ )
lowerCamelCase_ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCamelCase_ = torch.tensor(SCREAMING_SNAKE_CASE_ ) # (bs)
return tk_t, lg_t
| 42 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = '''roformer'''
def __init__( self: Optional[int] , UpperCamelCase_: Optional[Any]=50_000 , UpperCamelCase_: Tuple=None , UpperCamelCase_: int=768 , UpperCamelCase_: Any=12 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Optional[Any]=3_072 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Tuple=1_536 , UpperCamelCase_: Any=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Any=1E-1_2 , UpperCamelCase_: Optional[Any]=0 , UpperCamelCase_: str=False , UpperCamelCase_: Optional[int]=True , **UpperCamelCase_: Dict , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size if embedding_size is None else embedding_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = rotary_value
lowercase__ = use_cache
class _a ( UpperCamelCase__ ):
@property
def lowerCamelCase_ ( self: str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 43 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
UpperCAmelCase_ : Any = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
UpperCAmelCase_ : Union[str, Any] = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
UpperCAmelCase_ : List[str] = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ),reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"],)
def lowerCamelCase_ ( self : List[Any],__A : Optional[Any],__A : Tuple,__A : Tuple=False ):
_lowerCamelCase : Any = spearmanr(__A,__A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 44 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
from collections.abc import Sequence
def A ( lowercase__ : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
UpperCamelCase__ :int = nums[0]
for i in range(1 , len(lowercase__ ) ):
UpperCamelCase__ :str = nums[i]
UpperCamelCase__ :List[str] = max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCamelCase = int(input("Enter number of elements : ").strip())
UpperCamelCase = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array)) | 45 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCAmelCase : Dict = '''facebook/wmt19-en-de'''
_lowerCAmelCase : int = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCAmelCase : List[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCAmelCase : Any = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCAmelCase : List[str] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_lowerCAmelCase : Union[str, Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
_lowerCAmelCase : int = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de | 46 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
# save results
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ):
__a : Dict = 2
if unlogit:
__a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__a : Any = p * torch.log(lowerCamelCase_ )
__a : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ):
__a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__a : Any = None
__a : Optional[int] = 0.0
__a : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__a : Dict = tuple(t.to(args.device ) for t in inputs )
((__a) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__a , __a , __a : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__a : List[str] = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__a : Optional[Any] = 2
__a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase_ )
logger.info('Head ranked by importance scores' )
__a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__a : str = torch.arange(
head_importance.numel() , device=args.device )
__a : Tuple = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
__a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__a : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold )
__a : Tuple = torch.ones_like(lowerCamelCase_ )
__a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__a : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__a : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__a : List[str] = float('Inf' )
__a : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__a : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__a : int = new_head_mask.view(-1 )
__a : Tuple = 0.0
__a : int = new_head_mask.view_as(lowerCamelCase_ )
__a : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__a , __a , __a : int = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = datetime.now()
__a , __a , __a : List[str] = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[str] = 1 / loss
__a : List[Any] = datetime.now() - before_time
__a : List[str] = sum(p.numel() for p in model.parameters() )
__a : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__a : Optional[Any] = sum(p.numel() for p in model.parameters() )
__a : Tuple = datetime.now()
__a , __a , __a : Tuple = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__a : Optional[Any] = 1 / loss
__a : List[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase__ ( ):
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
__a : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__a : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__a : Union[str, Any] = torch.device('cuda' , args.local_rank )
__a : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__a : List[Any] = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Prepare dataset
__a : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__a : str = (torch.from_numpy(lowerCamelCase_ ),)
__a : List[str] = TensorDataset(*lowerCamelCase_ )
__a : Optional[Any] = RandomSampler(lowerCamelCase_ )
__a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : List[Any] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = 0
for ch in input_str:
__UpperCAmelCase = ord(snake_case_ )
__UpperCAmelCase = pow(2 , snake_case_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
lowerCamelCase__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowerCamelCase__ = np.concatenate(__lowerCAmelCase , axis=0 )
lowerCamelCase__ = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
lowerCamelCase__ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase__ = 2.0 * image - 1.0
lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(__lowerCAmelCase , dim=0 )
return image
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any=0.9995 ):
if not isinstance(__lowerCAmelCase , np.ndarray ):
lowerCamelCase__ = True
lowerCamelCase__ = va.device
lowerCamelCase__ = va.cpu().numpy()
lowerCamelCase__ = va.cpu().numpy()
lowerCamelCase__ = np.sum(va * va / (np.linalg.norm(__lowerCAmelCase ) * np.linalg.norm(__lowerCAmelCase )) )
if np.abs(__lowerCAmelCase ) > DOT_THRESHOLD:
lowerCamelCase__ = (1 - t) * va + t * va
else:
lowerCamelCase__ = np.arccos(__lowerCAmelCase )
lowerCamelCase__ = np.sin(__lowerCAmelCase )
lowerCamelCase__ = theta_a * t
lowerCamelCase__ = np.sin(__lowerCAmelCase )
lowerCamelCase__ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase__ = sin_theta_t / sin_theta_a
lowerCamelCase__ = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
return va
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = F.normalize(__lowerCAmelCase , dim=-1 )
lowerCamelCase__ = F.normalize(__lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
for param in model.parameters():
lowerCamelCase__ = value
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,):
super().__init__()
self.register_modules(
vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,clip_model=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,coca_model=_lowerCAmelCase ,coca_tokenizer=_lowerCAmelCase ,coca_transform=_lowerCAmelCase ,)
lowerCamelCase__ = (
feature_extractor.size
if isinstance(feature_extractor.size ,_lowerCAmelCase )
else feature_extractor.size["""shortest_edge"""]
)
lowerCamelCase__ = transforms.Normalize(mean=feature_extractor.image_mean ,std=feature_extractor.image_std )
set_requires_grad(self.text_encoder ,_lowerCAmelCase )
set_requires_grad(self.clip_model ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
self.enable_attention_slicing(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.vae ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.vae ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.unet ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.unet ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# get the original timestep using init_timestep
lowerCamelCase__ = min(int(num_inference_steps * strength ) ,_lowerCAmelCase )
lowerCamelCase__ = max(num_inference_steps - init_timestep ,0 )
lowerCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ):
if not isinstance(_lowerCAmelCase ,torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(_lowerCAmelCase )}''' )
lowerCamelCase__ = image.to(device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCAmelCase )
]
lowerCamelCase__ = torch.cat(_lowerCAmelCase ,dim=0 )
else:
lowerCamelCase__ = self.vae.encode(_lowerCAmelCase ).latent_dist.sample(_lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__ = 0.1_8215 * init_latents
lowerCamelCase__ = init_latents.repeat_interleave(_lowerCAmelCase ,dim=0 )
lowerCamelCase__ = randn_tensor(init_latents.shape ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
# get latents
lowerCamelCase__ = self.scheduler.add_noise(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = init_latents
return latents
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.coca_transform(_lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase__ = self.coca_model.generate(transformed_image.to(device=self.device ,dtype=self.coca_model.dtype ) )
lowerCamelCase__ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" ,"""""" ).rstrip(""" .,""" )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.feature_extractor.preprocess(_lowerCAmelCase )
lowerCamelCase__ = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase__ = self.clip_model.get_image_features(_lowerCAmelCase )
lowerCamelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=_lowerCAmelCase )
lowerCamelCase__ = image_embeddings_clip.repeat_interleave(_lowerCAmelCase ,dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
lowerCamelCase__ = latents.detach().requires_grad_()
lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
# predict the noise residual
lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ).sample
if isinstance(self.scheduler ,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase__ = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase__ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase__ = torch.sqrt(_lowerCAmelCase )
lowerCamelCase__ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler ,_lowerCAmelCase ):
lowerCamelCase__ = self.scheduler.sigmas[index]
lowerCamelCase__ = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__ = 1 / 0.1_8215 * sample
lowerCamelCase__ = self.vae.decode(_lowerCAmelCase ).sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
lowerCamelCase__ = transforms.Resize(self.feature_extractor_size )(_lowerCAmelCase )
lowerCamelCase__ = self.normalize(_lowerCAmelCase ).to(latents.dtype )
lowerCamelCase__ = self.clip_model.get_image_features(_lowerCAmelCase )
lowerCamelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=_lowerCAmelCase )
lowerCamelCase__ = spherical_dist_loss(_lowerCAmelCase ,_lowerCAmelCase ).mean() * clip_guidance_scale
lowerCamelCase__ = -torch.autograd.grad(_lowerCAmelCase ,_lowerCAmelCase )[0]
if isinstance(self.scheduler ,_lowerCAmelCase ):
lowerCamelCase__ = latents.detach() + grads * (sigma**2)
lowerCamelCase__ = noise_pred_original
else:
lowerCamelCase__ = noise_pred_original - torch.sqrt(_lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 0.6 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = 7.5 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = 1_00 ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,_lowerCAmelCase = 0.8 ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 0.1 ,):
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(_lowerCAmelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_lowerCAmelCase ,torch.Generator ) and batch_size > 1:
lowerCamelCase__ = [generator] + [None] * (batch_size - 1)
lowerCamelCase__ = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
lowerCamelCase__ = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase__ = """, """.join(_lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase__ = self.get_image_description(_lowerCAmelCase )
if style_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase__ = self.get_image_description(_lowerCAmelCase )
# get prompt text embeddings for content and style
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase__ = slerp(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
lowerCamelCase__ = text_embeddings.repeat_interleave(_lowerCAmelCase ,dim=0 )
# set timesteps
lowerCamelCase__ = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase__ = {}
if accepts_offset:
lowerCamelCase__ = 1
self.scheduler.set_timesteps(_lowerCAmelCase ,**_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase__ , lowerCamelCase__ = self.get_timesteps(_lowerCAmelCase ,_lowerCAmelCase ,self.device )
lowerCamelCase__ = timesteps[:1].repeat(_lowerCAmelCase )
# Preprocess image
lowerCamelCase__ = preprocess(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.prepare_latents(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,text_embeddings.dtype ,self.device ,_lowerCAmelCase )
lowerCamelCase__ = preprocess(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.prepare_latents(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,text_embeddings.dtype ,self.device ,_lowerCAmelCase )
lowerCamelCase__ = slerp(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if clip_guidance_scale > 0:
lowerCamelCase__ = self.get_clip_image_embeddings(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.get_clip_image_embeddings(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = slerp(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__ = content_text_input.input_ids.shape[-1]
lowerCamelCase__ = self.tokenizer([""""""] ,padding="""max_length""" ,max_length=_lowerCAmelCase ,return_tensors="""pt""" )
lowerCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase__ = uncond_embeddings.repeat_interleave(_lowerCAmelCase ,dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase__ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device="""cpu""" ,dtype=_lowerCAmelCase ).to(
self.device )
else:
lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ,dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ = {}
if accepts_eta:
lowerCamelCase__ = eta
# check if the scheduler accepts generator
lowerCamelCase__ = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase__ = generator
with self.progress_bar(total=_lowerCAmelCase ):
for i, t in enumerate(_lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
# predict the noise residual
lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase__ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase__ , lowerCamelCase__ = self.cond_fn(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__ = 1 / 0.1_8215 * latents
lowerCamelCase__ = self.vae.decode(_lowerCAmelCase ).sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase ,nsfw_content_detected=_lowerCAmelCase )
| 50 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , a__ : str , ):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = '''last'''
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def __snake_case ( self : str ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __snake_case ( self : Dict , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Optional[int] , a__ : int , a__ : Tuple , a__ : Optional[Any] , a__ : Union[str, Any] , ):
UpperCAmelCase = TFFlaubertModel(config=a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase = model(a__ )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : str , a__ : Tuple , a__ : Tuple , a__ : Optional[int] , a__ : str , a__ : Any , a__ : List[str] , a__ : str , a__ : List[str] , a__ : int , ):
UpperCAmelCase = TFFlaubertWithLMHeadModel(a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[Any] , a__ : Optional[int] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : int , a__ : int , ):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : Tuple , a__ : Dict , a__ : Dict , a__ : List[Any] , a__ : List[Any] , a__ : List[str] , a__ : Union[str, Any] , ):
UpperCAmelCase = TFFlaubertForSequenceClassification(a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : Any , a__ : Optional[int] , a__ : Dict , a__ : Any , a__ : Optional[Any] , a__ : str , a__ : Optional[int] , a__ : Any , a__ : Any , a__ : List[Any] , ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[Any] , a__ : Any , a__ : Any , a__ : Optional[int] , a__ : str , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Tuple , a__ : Tuple , ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=a__ )
UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : int ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
),
) = config_and_inputs
UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase =(
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Union[str, Any] , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : str , a__ : Optional[int] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __snake_case ( self : int ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , emb_dim=37 )
def __snake_case ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a__ )
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*a__ )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*a__ )
@slow
def __snake_case ( self : Dict ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCAmelCase = model(a__ )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , a__ )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 51 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__lowerCAmelCase = Features({'''audio''': Audio()} )
__lowerCAmelCase = Features({'''labels''': ClassLabel} )
__lowerCAmelCase = "audio"
__lowerCAmelCase = "labels"
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
__a : int = copy.deepcopy(self )
__a : Any = self.label_schema.copy()
__a : Tuple = features[self.label_column]
__a : List[str] = label_schema
return task_template
@property
def _lowerCamelCase ( self ):
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 52 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 0 |
from math import isclose, sqrt
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
__lowerCAmelCase = point_y / 4 / point_x
__lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCAmelCase = outgoing_gradient**2 + 4
__lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
__lowerCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCAmelCase = x_minus if isclose(lowerCAmelCase_, lowerCAmelCase_ ) else x_plus
__lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( lowerCAmelCase_ : float = 1.4, lowerCAmelCase_ : float = -9.6 ):
__lowerCAmelCase = 0
__lowerCAmelCase = first_x_coord
__lowerCAmelCase = first_y_coord
__lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = next_point(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
def a__ ( lowercase__ ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ =head.next, head
while fast and fast.next:
UpperCAmelCase_ =fast.next.next
UpperCAmelCase_ =slow.next
UpperCAmelCase_ =slow.next
UpperCAmelCase_ =None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ =None
while second:
UpperCAmelCase_ =second.next
UpperCAmelCase_ =node
UpperCAmelCase_ =second
UpperCAmelCase_ =nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ =node.next
UpperCAmelCase_ =head.next
return True
def a__ ( lowercase__ ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ =UpperCAmelCase_ =UpperCAmelCase_ =head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ =fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ =[slow.val]
while slow.next:
UpperCAmelCase_ =slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ =cur.next
return True
def a__ ( lowercase__ ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ ={}
UpperCAmelCase_ =0
while head:
if head.val in d:
d[head.val].append(lowercase__ )
else:
UpperCAmelCase_ =[pos]
UpperCAmelCase_ =head.next
pos += 1
UpperCAmelCase_ =pos - 1
UpperCAmelCase_ =0
for v in d.values():
if len(lowercase__ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ =0
for i in range(0 , len(lowercase__ ) ):
if v[i] + v[len(lowercase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 54 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 0 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = torch.nn.Linear(2 , 4 )
__A = torch.optim.AdamW(model.parameters() , lr=1.0 )
__A = torch.optim.lr_scheduler.OneCycleLR(a_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__A = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__A = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
__A = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(a_ )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_cuda
def UpperCamelCase_ ( self : Dict ):
__A = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(A ):
__A = Accelerator(cpu=A )
def UpperCamelCase_ ( self : Dict ):
__A = Accelerator()
__A = GradientState()
assert state.num_steps == 1
__A = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__A = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCamelCase_ ( self : str ):
__A = Accelerator()
__A , __A , __A , __A , __A = create_components()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = accelerator.prepare(A ,A ,A ,A ,A )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def UpperCamelCase_ ( self : Optional[int] ):
__A = Accelerator()
__A , __A , __A , __A , __A = create_components()
accelerator.prepare(A ,A ,A ,A ,A )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def UpperCamelCase_ ( self : str ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*A : int ,**A : List[str] ):
pass
with patch("torch.cuda.set_device" ,A ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
__A = Accelerator()
self.assertEqual(str(accelerator.state.device ) ,"cuda:64" )
def UpperCamelCase_ ( self : Tuple ):
__A = Accelerator()
__A , __A , __A , __A , __A = create_components()
accelerator.prepare(A ,A ,A ,A ,A )
__A = get_signature(A )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A )
# make sure random weights don't match
load_random_weights(A )
self.assertTrue(abs(model_signature - get_signature(A ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(A )
self.assertTrue(abs(model_signature - get_signature(A ) ) < 1E-3 )
def UpperCamelCase_ ( self : Dict ):
__A = Accelerator()
__A , __A , __A , __A , __A = create_components()
accelerator.prepare(A ,A ,A ,A ,A )
__A = get_signature(A )
# saving hook
def save_config(A : Optional[int] ,A : List[Any] ,A : int ):
__A = {"class_name": models[0].__class__.__name__}
with open(os.path.join(A ,"data.json" ) ,"w" ) as f:
json.dump(A ,A )
# loading hook
def load_config(A : List[Any] ,A : Any ):
with open(os.path.join(A ,"data.json" ) ,"r" ) as f:
__A = json.load(A )
__A = config["class_name"]
__A = accelerator.register_save_state_pre_hook(A )
__A = accelerator.register_load_state_pre_hook(A )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A )
# make sure random weights don't match with hooks
load_random_weights(A )
self.assertTrue(abs(model_signature - get_signature(A ) ) > 1E-3 )
# random class name to verify correct one is loaded
__A = "random"
# make sure loaded weights match with hooks
accelerator.load_state(A )
self.assertTrue(abs(model_signature - get_signature(A ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A )
# make sure random weights don't match with hooks removed
load_random_weights(A )
self.assertTrue(abs(model_signature - get_signature(A ) ) > 1E-3 )
# random class name to verify correct one is loaded
__A = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(A )
self.assertTrue(abs(model_signature - get_signature(A ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = Accelerator()
__A , __A , __A , __A , __A = create_components()
__A = None
# This should work
__A , __A , __A , __A , __A , __A = accelerator.prepare(
A ,A ,A ,A ,A ,A )
self.assertTrue(dummy_obj is None )
def UpperCamelCase_ ( self : Optional[int] ):
__A = Accelerator()
__A , __A , __A , __A , __A = create_components()
__A = [1, 2, 3]
# This should work
__A , __A , __A , __A , __A , __A = accelerator.prepare(
A ,A ,A ,A ,A ,A )
self.assertEqual(
getattr(A ,"_is_accelerate_prepared" ,A ) ,A ,"Dummy object should have `_is_accelerate_prepared` set to `True`" ,)
self.assertEqual(
getattr(A ,"_is_accelerate_prepared" ,A ) ,A ,"Model is missing `_is_accelerator_prepared` or is set to `False`" ,)
self.assertEqual(
getattr(A ,"_is_accelerate_prepared" ,A ) ,A ,"Optimizer is missing `_is_accelerator_prepared` or is set to `False`" ,)
self.assertEqual(
getattr(A ,"_is_accelerate_prepared" ,A ) ,A ,"Scheduler is missing `_is_accelerator_prepared` or is set to `False`" ,)
self.assertEqual(
getattr(A ,"_is_accelerate_prepared" ,A ) ,A ,"Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" ,)
self.assertEqual(
getattr(A ,"_is_accelerate_prepared" ,A ) ,A ,"Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" ,)
@slow
@require_bnb
def UpperCamelCase_ ( self : Tuple ):
from transformers import AutoModelForCausalLM
__A = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" ,load_in_abit=A ,device_map={"": 0} ,)
__A = Accelerator()
# This should work
__A = accelerator.prepare(A )
@slow
@require_bnb
def UpperCamelCase_ ( self : Any ):
from transformers import AutoModelForCausalLM
__A = Accelerator()
with init_empty_weights():
__A = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" ,)
model.tie_weights()
__A = infer_auto_device_map(A )
__A = "cpu"
__A = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" ,device_map=A ,load_in_abit=A ,llm_inta_enable_fpaa_cpu_offload=A )
# This should not work and get value error
with self.assertRaises(A ):
__A = accelerator.prepare(A )
@slow
@require_bnb
@require_multi_gpu
def UpperCamelCase_ ( self : int ):
from transformers import AutoModelForCausalLM
__A = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
__A = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" ,)
model.tie_weights()
__A = infer_auto_device_map(A )
__A = 1
__A = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" ,load_in_abit=A ,device_map=A ,)
__A = Accelerator()
# This should not work and get value error
with self.assertRaises(A ):
__A = accelerator.prepare(A )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCamelCase_ ( self : Dict ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
__A = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" ,)
__A = infer_auto_device_map(A )
__A = 1
__A = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" ,load_in_abit=A ,device_map=A ,)
__A = Accelerator()
# This should work
__A = accelerator.prepare(A )
@require_cuda
def UpperCamelCase_ ( self : Optional[Any] ):
__A = torch.nn.Linear(10 ,10 )
__A = torch.optim.SGD(model.parameters() ,lr=0.01 )
__A = Accelerator(cpu=A )
__A = accelerator.prepare(A )
| 55 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _a (lowercase__ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
"""simple docstring"""
__snake_case = []
if isinstance(lowercase__ , lowercase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _a (lowercase__ : int , lowercase__ : Tuple[int, ...] ) -> Tuple[int, ...]:
"""simple docstring"""
__snake_case = []
for d in reversed(lowercase__ ):
idx.append(flat_idx % d )
__snake_case = flat_idx // d
return tuple(reversed(lowercase__ ) )
@torch.jit.ignore
def _a (lowercase__ : Sequence[int] , lowercase__ : Sequence[int] , lowercase__ : Sequence[int] , lowercase__ : Optional[Sequence[bool]] = None , lowercase__ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
"""simple docstring"""
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowercase__ : List[bool] ) -> None:
__snake_case = True
for i in range(len(lowercase__ ) ):
__snake_case = -1 * (i + 1)
l[reversed_idx] &= tally
__snake_case = l[reversed_idx]
if start_edges is None:
__snake_case = [s == 0 for s in start]
reduce_edge_list(lowercase__ )
if end_edges is None:
__snake_case = [e == (d - 1) for e, d in zip(lowercase__ , lowercase__ )]
reduce_edge_list(lowercase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase__ ) == 0:
return [()]
elif len(lowercase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__snake_case = []
__snake_case = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase__ , lowercase__ ):
if s == e:
path_list.append(slice(lowercase__ , s + 1 ) )
else:
break
__snake_case = tuple(lowercase__ )
__snake_case = len(lowercase__ )
# start == end, and we're done
if divergence_idx == len(lowercase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case = start[divergence_idx]
return tuple(
path + (slice(lowercase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case = end[divergence_idx]
return tuple(
path + (slice(lowercase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__snake_case = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _a (lowercase__ : torch.Tensor , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> torch.Tensor:
"""simple docstring"""
__snake_case = t.shape[:no_batch_dims]
__snake_case = list(_flat_idx_to_idx(lowercase__ , lowercase__ ) )
# _get_minimal_slice_set is inclusive
__snake_case = list(_flat_idx_to_idx(flat_end - 1 , lowercase__ ) )
# Get an ordered list of slices to perform
__snake_case = _get_minimal_slice_set(
lowercase__ , lowercase__ , lowercase__ , )
__snake_case = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _a (lowercase__ : Callable , lowercase__ : Dict[str, Any] , lowercase__ : int , lowercase__ : int , lowercase__ : bool = False , lowercase__ : Any = None , lowercase__ : bool = False , ) -> Any:
"""simple docstring"""
if not (len(lowercase__ ) > 0):
raise ValueError('Must provide at least one input' )
__snake_case = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase__ )]
__snake_case = tuple([max(lowercase__ ) for s in zip(*lowercase__ )] )
def _prep_inputs(lowercase__ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__snake_case = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__snake_case = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__snake_case = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__snake_case = tensor_tree_map(_prep_inputs , lowercase__ )
__snake_case = None
if _out is not None:
__snake_case = tensor_tree_map(lambda lowercase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__snake_case = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__snake_case = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase__ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__snake_case = 0
__snake_case = prepped_outputs
for _ in range(lowercase__ ):
# Chunk the input
if not low_mem:
__snake_case = _select_chunk
else:
__snake_case = partial(
_chunk_slice , flat_start=lowercase__ , flat_end=min(lowercase__ , i + chunk_size ) , no_batch_dims=len(lowercase__ ) , )
__snake_case = tensor_tree_map(lowercase__ , lowercase__ )
# Run the layer on the chunk
__snake_case = layer(**lowercase__ )
# Allocate space for the output
if out is None:
__snake_case = tensor_tree_map(lambda lowercase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowercase__ )
# Put the chunk in its pre-allocated space
if isinstance(lowercase__ , lowercase__ ):
def assign(lowercase__ : dict , lowercase__ : dict ) -> None:
for k, v in da.items():
if isinstance(lowercase__ , lowercase__ ):
assign(lowercase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__snake_case = da[k]
assign(lowercase__ , lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
for xa, xa in zip(lowercase__ , lowercase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__snake_case = xa
elif isinstance(lowercase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__snake_case = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
__snake_case = tensor_tree_map(lambda lowercase__ : t.view(orig_batch_dims + t.shape[1:] ) , lowercase__ )
return out
class _lowercase :
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 512 , ) -> Union[str, Any]:
__snake_case = max_chunk_size
__snake_case = None
__snake_case = None
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__snake_case = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__snake_case = [c for c in candidates if c > min_chunk_size]
__snake_case = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(SCREAMING_SNAKE_CASE_ : int ) -> bool:
try:
with torch.no_grad():
fn(*SCREAMING_SNAKE_CASE_ , chunk_size=SCREAMING_SNAKE_CASE_ )
return True
except RuntimeError:
return False
__snake_case = 0
__snake_case = len(SCREAMING_SNAKE_CASE_ ) - 1
while i > min_viable_chunk_size_index:
__snake_case = test_chunk_size(candidates[i] )
if not viable:
__snake_case = (min_viable_chunk_size_index + i) // 2
else:
__snake_case = i
__snake_case = (i + len(SCREAMING_SNAKE_CASE_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Iterable , SCREAMING_SNAKE_CASE_ : Iterable ) -> bool:
__snake_case = True
for aa, aa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert type(SCREAMING_SNAKE_CASE_ ) == type(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )]
__snake_case = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )]
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
consistent &= aa == aa
return consistent
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : tuple , SCREAMING_SNAKE_CASE_ : int , ) -> int:
__snake_case = True
__snake_case = tree_map(lambda SCREAMING_SNAKE_CASE_ : a.shape if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) else a , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE_ )
__snake_case = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE_ )
else:
# Otherwise, we can reuse the precomputed value
__snake_case = False
if not consistent:
__snake_case = self._determine_favorable_chunk_size(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 56 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
A_ : Tuple = 'scheduler_config.json'
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Any =1
a : Union[str, Any] =2
a : List[str] =3
a : List[str] =4
a : Dict =5
a : List[str] =6
a : str =7
a : int =8
a : Dict =9
a : int =10
a : int =11
a : int =12
a : str =13
a : List[str] =14
@dataclass
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : torch.FloatTensor
class _lowerCAmelCase:
"""simple docstring"""
a : Any =SCHEDULER_CONFIG_NAME
a : Optional[int] =[]
a : int =True
@classmethod
def _a ( cls , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=False , **_lowerCamelCase , ):
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Any = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , return_commit_hash=_lowerCamelCase , **_lowerCamelCase , )
return cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = False , **_lowerCamelCase ):
self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self ):
return self._get_compatibles()
@classmethod
def _a ( cls ):
UpperCamelCase_: Any = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase_: int = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase_: str = [
getattr(_lowerCamelCase , _lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase )
]
return compatible_classes | 57 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=False , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=1_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> Dict:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : str = batch_size
snake_case_ : Dict = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Tuple = use_input_mask
snake_case_ : str = use_token_type_ids
snake_case_ : Any = use_labels
snake_case_ : Any = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Optional[Any] = type_vocab_size
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : List[str] = num_labels
snake_case_ : Optional[Any] = num_choices
snake_case_ : str = scope
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : int = None
if self.use_input_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
snake_case_ : Optional[Any] = None
snake_case_ : Optional[int] = None
if self.use_labels:
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Dict = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_lowercase , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = EsmForProteinFolding(config=_lowercase ).float()
model.to(_lowercase )
model.eval()
snake_case_ : Tuple = model(_lowercase , attention_mask=_lowercase )
snake_case_ : int = model(_lowercase )
snake_case_ : Any = model(_lowercase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Dict = config_and_inputs
snake_case_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = False
_lowerCamelCase = (EsmForProteinFolding,) if is_torch_available() else ()
_lowerCamelCase = ()
_lowerCamelCase = {} if is_torch_available() else {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = EsmFoldModelTester(self )
snake_case_ : Union[str, Any] = ConfigTester(self , config_class=_lowercase , hidden_size=3_7 )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip("""Does not support attention outputs""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
snake_case_ : Any = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
snake_case_ : Tuple = model(_lowercase )["""positions"""]
snake_case_ : Any = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _lowercase , atol=1E-4 ) )
| 58 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "roc_bert"
def __init__(self : Dict , UpperCAmelCase_ : Optional[int]=30_522 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Union[str, Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=1E-1_2 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : int="absolute" , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : int=910 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Any=24_858 , UpperCAmelCase_ : List[str]=True , **UpperCAmelCase_ : str , ) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =vocab_size
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: str =hidden_size
lowerCamelCase__: int =num_hidden_layers
lowerCamelCase__: Optional[int] =num_attention_heads
lowerCamelCase__: Optional[Any] =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Optional[Any] =hidden_dropout_prob
lowerCamelCase__: Optional[Any] =attention_probs_dropout_prob
lowerCamelCase__: List[Any] =initializer_range
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Tuple =use_cache
lowerCamelCase__: int =enable_pronunciation
lowerCamelCase__: str =enable_shape
lowerCamelCase__: List[Any] =pronunciation_embed_dim
lowerCamelCase__: int =pronunciation_vocab_size
lowerCamelCase__: Union[str, Any] =shape_embed_dim
lowerCamelCase__: Union[str, Any] =shape_vocab_size
lowerCamelCase__: int =concat_input
lowerCamelCase__: str =position_embedding_type
lowerCamelCase__: str =classifier_dropout
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
| 59 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase_ = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
lowerCAmelCase_ = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
lowerCAmelCase_ = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCAmelCase_ = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCAmelCase_ = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
for tf_name, hf_name in patterns:
snake_case_ : Optional[int] = k.replace(_UpperCamelCase , _UpperCamelCase )
return k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
snake_case_ : Tuple = BigBirdPegasusConfig(**_UpperCamelCase )
snake_case_ : List[str] = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
snake_case_ : Tuple = torch_model.state_dict()
snake_case_ : Tuple = {}
# separating decoder weights
snake_case_ : Dict = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
snake_case_ : Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
snake_case_ : Optional[int] = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
snake_case_ : str = DECODER_PATTERNS
snake_case_ : Optional[int] = rename_state_dict_key(_UpperCamelCase , _UpperCamelCase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
snake_case_ : List[str] = v.T
snake_case_ : Dict = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
snake_case_ : Union[str, Any] = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
snake_case_ : Optional[Any] = REMAINING_PATTERNS
snake_case_ : int = rename_state_dict_key(_UpperCamelCase , _UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
snake_case_ : List[str] = v.T
snake_case_ : Dict = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
snake_case_ : Dict = mapping['''model.embed_positions.weight''']
snake_case_ : Tuple = mapping.pop('''model.embed_positions.weight''' )
snake_case_ , snake_case_ : Union[str, Any] = torch_model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
snake_case_ : Union[str, Any] = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : str = tf.train.list_variables(_UpperCamelCase )
snake_case_ : List[str] = {}
snake_case_ : int = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase , desc='''converting tf checkpoint to dict''' ):
snake_case_ : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case_ : Any = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
snake_case_ : int = array
return tf_weights
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : List[Any] = get_tf_weights_as_numpy(_UpperCamelCase )
snake_case_ : Optional[Any] = convert_bigbird_pegasus(_UpperCamelCase , _UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 60 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.