code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : Any = len(_lowerCAmelCase )
UpperCamelCase : Any = len(matrix[0] )
UpperCamelCase : Union[str, Any] = min(_lowerCAmelCase , _lowerCAmelCase )
for row in range(_lowerCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowerCAmelCase ):
UpperCamelCase : Any = matrix[col][row] / matrix[row][row]
for i in range(_lowerCAmelCase , _lowerCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase : List[Any] = True
for i in range(row + 1 , _lowerCAmelCase ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase : List[str] = matrix[i], matrix[row]
UpperCamelCase : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_lowerCAmelCase ):
UpperCamelCase : List[Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def __UpperCamelCase( *A_ , **A_ ):
'''simple docstring'''
pass
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Dict = np.array(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
_UpperCAmelCase :Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCAmelCase :str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MaskGenerationPipeline(model=A_ , image_processor=A_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@slow
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCamelCase : Tuple = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
UpperCamelCase : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(A_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = "facebook/sam-vit-huge"
UpperCamelCase : str = pipeline("mask-generation" , model=A_ )
UpperCamelCase : str = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCamelCase : Tuple = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(A_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 629 | 1 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( A__ , A__ , A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(A__ )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=A__ )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.664694
_UpperCAmelCase = 0.207951
_UpperCAmelCase = 0.121194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0352513
_UpperCAmelCase = TapasForQuestionAnswering(config=A__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.903421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.763141
_UpperCAmelCase = TapasForQuestionAnswering(config=A__ )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=A__ )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=A__ )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=A__ )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A__ , A__ , A__ )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A__ )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(A__ )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 579 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def A__ ( A__ , A__ ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(A__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def A__ ( A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = str(A__ )
dataset_info.write_to_directory(A__ )
_UpperCAmelCase = DatasetInfo.from_directory(A__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A__ , "dataset_info.json" ) )
def A__ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(A__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(A__ )
_UpperCAmelCase = yaml.safe_load(A__ )
assert dataset_info_yaml_dict == reloaded
def A__ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def A__ ( A__ , A__ ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(A__ )
dataset_infos_dict.write_to_directory(A__ )
_UpperCAmelCase = DatasetInfosDict.from_directory(A__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A__ , "README.md" ) )
| 579 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a: Union[str, Any] = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[str] = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__a: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 108 |
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : list[list[int]] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase , (int, float) ):
raise error
_UpperCAmelCase = rows
else:
_UpperCAmelCase = []
def lowerCamelCase ( self : Optional[Any] ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowerCamelCase ( self : Tuple ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowerCamelCase ( self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowerCamelCase ( self : Optional[int] ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowerCamelCase ( self : Optional[Any] ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowerCamelCase ( self : Tuple , lowerCamelCase : int , lowerCamelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase ).determinant()
def lowerCamelCase ( self : int , lowerCamelCase : int , lowerCamelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase , lowerCamelCase )
return -1 * self.get_minor(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : str ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(lowerCamelCase , lowerCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowerCamelCase ( self : Dict ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowerCamelCase ( self : Optional[Any] ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__( self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowerCamelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def lowerCamelCase ( self : Tuple , lowerCamelCase : list[int] , lowerCamelCase : int | None = None ) -> None:
"""simple docstring"""
_UpperCAmelCase = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase , (int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowerCamelCase )
else:
_UpperCAmelCase = self.rows[0:position] + [row] + self.rows[position:]
def lowerCamelCase ( self : List[Any] , lowerCamelCase : list[int] , lowerCamelCase : int | None = None ) -> None:
"""simple docstring"""
_UpperCAmelCase = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase , (int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
_UpperCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_UpperCAmelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Dict , lowerCamelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase , lowerCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[str] , lowerCamelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__( self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__( self : Any , lowerCamelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[Any] , lowerCamelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : List[Any] , lowerCamelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase , lowerCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Any , lowerCamelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
_UpperCAmelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowerCamelCase ( cls : Tuple , lowerCamelCase : list[int] , lowerCamelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase : Union[str, Any] = get_logger(__name__)
class lowercase :
def __init__( self : Dict , _UpperCamelCase : Optional[str] = None ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
os.path.join(__a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
SCREAMING_SNAKE_CASE = Extractor
def __snake_case( self : Optional[int] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
SCREAMING_SNAKE_CASE = os.path.abspath(__a )
return os.path.join(self.extract_dir , hash_url_to_filename(__a ) )
def __snake_case( self : str , _UpperCamelCase : str , _UpperCamelCase : bool ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(__a ) and not (os.path.isdir(__a ) and os.listdir(__a ))
)
def __snake_case( self : str , _UpperCamelCase : str , _UpperCamelCase : bool = False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.extractor.infer_extractor_format(__a )
if not extractor_format:
return input_path
SCREAMING_SNAKE_CASE = self._get_output_path(__a )
if self._do_extract(__a , __a ):
self.extractor.extract(__a , __a , __a )
return output_path
class lowercase ( __lowercase ):
@classmethod
@abstractmethod
def __snake_case( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Union[str, Any] ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
...
class lowercase ( __lowercase , __lowercase ):
lowercase__ : List[bytes] = []
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
with open(__a , "rb" ) as f:
return f.read(__a )
@classmethod
def __snake_case( cls : Dict , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
SCREAMING_SNAKE_CASE = max(len(__a ) for cls_magic_number in cls.magic_numbers )
try:
SCREAMING_SNAKE_CASE = cls.read_magic_number(__a , __a )
except OSError:
return False
return any(magic_number.startswith(__a ) for cls_magic_number in cls.magic_numbers )
class lowercase ( __lowercase ):
@classmethod
def __snake_case( cls : Any , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Dict ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(__a )
@staticmethod
def __snake_case( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> int:
'''simple docstring'''
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(__a ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__a , __a ) ).startswith(__a )
def badlink(_UpperCamelCase : int , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
SCREAMING_SNAKE_CASE = resolved(os.path.join(__a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__a )
SCREAMING_SNAKE_CASE = resolved(__a )
for finfo in members:
if badpath(finfo.name , __a ):
logger.error(F"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(__a , __a ):
logger.error(F"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(__a , __a ):
logger.error(F"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a )
SCREAMING_SNAKE_CASE = tarfile.open(__a )
tar_file.extractall(__a , members=TarExtractor.safemembers(__a , __a ) )
tar_file.close()
class lowercase ( __lowercase ):
lowercase__ : Tuple = [B'''\x1F\x8B''']
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with gzip.open(__a , "rb" ) as gzip_file:
with open(__a , "wb" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class lowercase ( __lowercase ):
lowercase__ : Any = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __snake_case( cls : Any , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(__a , magic_number=__a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__a , "rb" ) as fp:
SCREAMING_SNAKE_CASE = _EndRecData(__a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
SCREAMING_SNAKE_CASE = fp.read(__a ) # CD is where we expect it to be
if len(__a ) == sizeCentralDir:
SCREAMING_SNAKE_CASE = struct.unpack(__a , __a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a )
with zipfile.ZipFile(__a , "r" ) as zip_file:
zip_file.extractall(__a )
zip_file.close()
class lowercase ( __lowercase ):
lowercase__ : Dict = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with lzma.open(__a ) as compressed_file:
with open(__a , "wb" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class lowercase ( __lowercase ):
lowercase__ : int = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__a , exist_ok=__a )
SCREAMING_SNAKE_CASE = rarfile.RarFile(__a )
rf.extractall(__a )
rf.close()
class lowercase ( __lowercase ):
lowercase__ : Union[str, Any] = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
SCREAMING_SNAKE_CASE = zstd.ZstdDecompressor()
with open(__a , "rb" ) as ifh, open(__a , "wb" ) as ofh:
dctx.copy_stream(__a , __a )
class lowercase ( __lowercase ):
lowercase__ : Optional[int] = [B'''\x42\x5A\x68''']
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with bza.open(__a , "rb" ) as compressed_file:
with open(__a , "wb" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class lowercase ( __lowercase ):
lowercase__ : str = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__a , exist_ok=__a )
with pyazr.SevenZipFile(__a , "r" ) as archive:
archive.extractall(__a )
class lowercase ( __lowercase ):
lowercase__ : Optional[Any] = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__a , "rb" ) as compressed_file:
with open(__a , "wb" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class lowercase :
lowercase__ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __snake_case( cls : Optional[int] ) -> List[str]:
'''simple docstring'''
return max(
len(__a )
for extractor in cls.extractors.values()
if issubclass(__a , __a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__a , magic_number_length=__a )
except OSError:
return b""
@classmethod
def __snake_case( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) -> bool:
'''simple docstring'''
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=__a , )
SCREAMING_SNAKE_CASE = cls.infer_extractor_format(__a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __snake_case( cls : Tuple , _UpperCamelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
SCREAMING_SNAKE_CASE = cls._get_magic_number_max_length()
SCREAMING_SNAKE_CASE = cls._read_magic_number(__a , __a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__a , magic_number=__a ):
return extractor_format
@classmethod
def __snake_case( cls : Any , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(__a ) , exist_ok=__a )
# Prevent parallel extractions
SCREAMING_SNAKE_CASE = str(Path(__a ).with_suffix(".lock" ) )
with FileLock(__a ):
shutil.rmtree(__a , ignore_errors=__a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__a , __a ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=__a , )
SCREAMING_SNAKE_CASE = extractor if extractor != """deprecated""" else extractor_format
else:
SCREAMING_SNAKE_CASE = cls.extractors[extractor_format]
return extractor.extract(__a , __a )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__a ):
return extractor.extract(__a , __a )
| 720 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647 | 0 |
"""simple docstring"""
from itertools import product
def lowerCamelCase__ ( __snake_case, __snake_case ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = sides_number
_UpperCamelCase = max_face_number * dice_number
_UpperCamelCase = [0] * (max_total + 1)
_UpperCamelCase = 1
_UpperCamelCase = range(__snake_case, max_face_number + 1 )
for dice_numbers in product(__snake_case, repeat=__snake_case ):
_UpperCamelCase = sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase__ ( ) -> float:
"""simple docstring"""
_UpperCamelCase = total_frequency_distribution(
sides_number=4, dice_number=9 )
_UpperCamelCase = total_frequency_distribution(
sides_number=6, dice_number=6 )
_UpperCamelCase = 0
_UpperCamelCase = 9
_UpperCamelCase = 4 * 9
_UpperCamelCase = 6
for peter_total in range(__snake_case, max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCamelCase = (4**9) * (6**6)
_UpperCamelCase = peter_wins_count / total_games_number
_UpperCamelCase = round(__snake_case, ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a , use_karras_sigmas=__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 19 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: List[Any] ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _UpperCAmelCase ( UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_sql_dataset(UpperCamelCase , UpperCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_sql_dataset(UpperCamelCase , UpperCamelCase )
def _UpperCAmelCase ( UpperCamelCase: Optional[int] ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(UpperCamelCase ) ) as con:
__lowerCAmelCase = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def _UpperCAmelCase ( UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = os.path.join(UpperCamelCase , "tmp.sql" )
__lowerCAmelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase ).read()
SqlDatasetWriter(UpperCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
__lowerCAmelCase = iter_sql_file(UpperCamelCase )
__lowerCAmelCase = iter_sql_file(UpperCamelCase )
for rowa, rowa in zip(UpperCamelCase , UpperCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = os.path.join(UpperCamelCase , "tmp.sql" )
__lowerCAmelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase ).read()
SqlDatasetWriter(UpperCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
__lowerCAmelCase = iter_sql_file(UpperCamelCase )
__lowerCAmelCase = iter_sql_file(UpperCamelCase )
for rowa, rowa in zip(UpperCamelCase , UpperCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: Dict , UpperCamelCase: Any ):
"""simple docstring"""
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = os.path.join(UpperCamelCase , "tmp.sql" )
__lowerCAmelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase ).read()
with pytest.raises(UpperCamelCase ):
SqlDatasetWriter(UpperCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 376 |
from __future__ import annotations
UpperCamelCase_ = list[tuple[int, int]]
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a :
def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : Node | None , ):
"""simple docstring"""
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = g_cost
__lowerCAmelCase = parent
__lowerCAmelCase = self.calculate_heuristic()
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = abs(self.pos_x - self.goal_x )
__lowerCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[int] , snake_case__ : int ):
"""simple docstring"""
return self.f_cost < other.f_cost
class a :
def __init__( self : Dict , snake_case__ : tuple[int, int] , snake_case__ : tuple[int, int] ):
"""simple docstring"""
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case__ )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , snake_case__ )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase = True
return self.retrace_path(snake_case__ )
self.closed_nodes.append(snake_case__ )
__lowerCAmelCase = self.get_successors(snake_case__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case__ )
else:
# retrieve the best current path
__lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(snake_case__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case__ )
else:
self.open_nodes.append(snake_case__ )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ ( self : int , snake_case__ : Node ):
"""simple docstring"""
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case__ , snake_case__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case__ , ) )
return successors
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Node | None ):
"""simple docstring"""
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
UpperCamelCase_ = GreedyBestFirst(init, goal)
UpperCamelCase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCamelCase_ = 2
for elem in grid:
print(elem)
| 376 | 1 |
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : int = ""
for word_or_phrase in separated:
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 397 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__a : Tuple = Mapping[str, np.ndarray]
__a : int = Mapping[str, Any] # Is a nested dict.
__a : Union[str, Any] = 0.01
@dataclasses.dataclass(frozen=snake_case_ )
class UpperCAmelCase:
"""simple docstring"""
a : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
a : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
a : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
a : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
a : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
a : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
a : Optional[str] = None
# Templates used to generate this protein (prediction-only)
a : Optional[Sequence[str]] = None
# Chain corresponding to each parent
a : Optional[Sequence[int]] = None
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Protein:
lowercase__ : str = r"(\[[A-Z]+\]\n)"
lowercase__ : List[str] = [tag.strip() for tag in re.split(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
lowercase__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] ,[l.split("\n" ) for l in tags[1::2]] )
lowercase__ : List[str] = ["N", "CA", "C"]
lowercase__ : Optional[int] = None
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ : List[Any] = g[1][0].strip()
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ : Optional[int] = "X" # FIXME: strings are immutable
lowercase__ : List[str] = np.array(
[residue_constants.restype_order.get(SCREAMING_SNAKE_CASE_ ,residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(SCREAMING_SNAKE_CASE_ ,g[1][axis].split() ) ) )
lowercase__ : Tuple = np.array(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Tuple = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ : int = np.array(list(map({"-": 0, "+": 1}.get ,g[1][0].strip() ) ) )
lowercase__ : Tuple = np.zeros(
(
len(SCREAMING_SNAKE_CASE_ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=SCREAMING_SNAKE_CASE_ ,atom_mask=SCREAMING_SNAKE_CASE_ ,aatype=SCREAMING_SNAKE_CASE_ ,residue_index=np.arange(len(SCREAMING_SNAKE_CASE_ ) ) ,b_factors=SCREAMING_SNAKE_CASE_ ,)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 0 ) -> List[str]:
lowercase__ : List[str] = []
lowercase__ : Union[str, Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowercase__ : List[Any] = prot.parents
lowercase__ : Any = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ : List[str] = [p for i, p in zip(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) if i == chain_id]
if parents is None or len(SCREAMING_SNAKE_CASE_ ) == 0:
lowercase__ : List[str] = ["N/A"]
pdb_headers.append(F"""PARENT {" ".join(SCREAMING_SNAKE_CASE_ )}""" )
return pdb_headers
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : List[str] = []
lowercase__ : Optional[Any] = pdb_str.split("\n" )
lowercase__ : Any = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowercase__ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ : Optional[int] = []
if prot.parents_chain_index is not None:
lowercase__ : Dict[str, List[str]] = {}
for p, i in zip(prot.parents ,prot.parents_chain_index ):
parent_dict.setdefault(str(SCREAMING_SNAKE_CASE_ ) ,[] )
parent_dict[str(SCREAMING_SNAKE_CASE_ )].append(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = max([int(SCREAMING_SNAKE_CASE_ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ : List[Any] = parent_dict.get(str(SCREAMING_SNAKE_CASE_ ) ,["N/A"] )
parents_per_chain.append(SCREAMING_SNAKE_CASE_ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ : List[str] = [["N/A"]]
def make_parent_line(SCREAMING_SNAKE_CASE_ ) -> str:
return F"""PARENT {" ".join(SCREAMING_SNAKE_CASE_ )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ : Dict = 0
for i, l in enumerate(SCREAMING_SNAKE_CASE_ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(SCREAMING_SNAKE_CASE_ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(SCREAMING_SNAKE_CASE_ ):
lowercase__ : int = parents_per_chain[chain_counter]
else:
lowercase__ : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(SCREAMING_SNAKE_CASE_ ) )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : List[str] = residue_constants.restypes + ["X"]
def res_atoa(SCREAMING_SNAKE_CASE_ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] ,"UNK" )
lowercase__ : Optional[int] = residue_constants.atom_types
lowercase__ : List[str] = []
lowercase__ : int = prot.atom_mask
lowercase__ : str = prot.aatype
lowercase__ : List[str] = prot.atom_positions
lowercase__ : Optional[Any] = prot.residue_index.astype(np.intaa )
lowercase__ : Tuple = prot.b_factors
lowercase__ : Union[str, Any] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
lowercase__ : List[Any] = get_pdb_headers(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
pdb_lines.extend(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = aatype.shape[0]
lowercase__ : List[str] = 1
lowercase__ : Tuple = 0
lowercase__ : List[str] = string.ascii_uppercase
lowercase__ : Optional[int] = None
# Add all atom sites.
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(SCREAMING_SNAKE_CASE_ ,atom_positions[i] ,atom_mask[i] ,b_factors[i] ):
if mask < 0.5:
continue
lowercase__ : Tuple = "ATOM"
lowercase__ : Optional[int] = atom_name if len(SCREAMING_SNAKE_CASE_ ) == 4 else F""" {atom_name}"""
lowercase__ : List[Any] = ""
lowercase__ : int = ""
lowercase__ : Any = 1.00
lowercase__ : Optional[Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ : Optional[int] = ""
lowercase__ : int = "A"
if chain_index is not None:
lowercase__ : Dict = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(SCREAMING_SNAKE_CASE_ )
atom_index += 1
lowercase__ : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ : Any = True
lowercase__ : int = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ : List[str] = "TER"
lowercase__ : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(SCREAMING_SNAKE_CASE_ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,) -> Protein:
return Protein(
aatype=features["aatype"] ,atom_positions=result["final_atom_positions"] ,atom_mask=result["final_atom_mask"] ,residue_index=features["residue_index"] + 1 ,b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) ,chain_index=SCREAMING_SNAKE_CASE_ ,remark=SCREAMING_SNAKE_CASE_ ,parents=SCREAMING_SNAKE_CASE_ ,parents_chain_index=SCREAMING_SNAKE_CASE_ ,) | 397 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 718 |
import string
import numpy
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE_ : Dict = numpy.vectorize(lambda UpperCAmelCase : x % 3_6 )
SCREAMING_SNAKE_CASE_ : List[Any] = numpy.vectorize(UpperCAmelCase )
def __init__( self : Optional[Any] ,__A : numpy.ndarray ) -> None:
_lowercase = self.modulus(__A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowercase = encrypt_key.shape[0]
def __UpperCAmelCase ( self : Tuple ,__A : str ) -> int:
return self.key_string.index(__A )
def __UpperCAmelCase ( self : Optional[int] ,__A : int ) -> str:
return self.key_string[round(__A )]
def __UpperCAmelCase ( self : str ) -> None:
_lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase = det % len(self.key_string )
_lowercase = len(self.key_string )
if greatest_common_divisor(__A ,len(self.key_string ) ) != 1:
_lowercase = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__A )
def __UpperCAmelCase ( self : Any ,__A : str ) -> str:
_lowercase = [char for char in text.upper() if char in self.key_string]
_lowercase = chars[-1]
while len(__A ) % self.break_key != 0:
chars.append(__A )
return "".join(__A )
def __UpperCAmelCase ( self : Optional[int] ,__A : str ) -> str:
_lowercase = self.process_text(text.upper() )
_lowercase = ''
for i in range(0 ,len(__A ) - self.break_key + 1 ,self.break_key ):
_lowercase = text[i : i + self.break_key]
_lowercase = [self.replace_letters(__A ) for char in batch]
_lowercase = numpy.array([vec] ).T
_lowercase = self.modulus(self.encrypt_key.dot(__A ) ).T.tolist()[
0
]
_lowercase = ''.join(
self.replace_digits(__A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCAmelCase ( self : List[Any] ) -> numpy.ndarray:
_lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase = det % len(self.key_string )
_lowercase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowercase = i
break
_lowercase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__A ) )
def __UpperCAmelCase ( self : Tuple ,__A : str ) -> str:
_lowercase = self.make_decrypt_key()
_lowercase = self.process_text(text.upper() )
_lowercase = ''
for i in range(0 ,len(__A ) - self.break_key + 1 ,self.break_key ):
_lowercase = text[i : i + self.break_key]
_lowercase = [self.replace_letters(__A ) for char in batch]
_lowercase = numpy.array([vec] ).T
_lowercase = self.modulus(decrypt_key.dot(__A ) ).T.tolist()[0]
_lowercase = ''.join(
self.replace_digits(__A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = int(input('Enter the order of the encryption key: ' ) )
_lowercase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(snake_case__ ):
_lowercase = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
_lowercase = HillCipher(numpy.array(snake_case__ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_lowercase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_lowercase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
_lowercase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 535 | 0 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: List[str] = logging.get_logger(__name__)
lowerCAmelCase: List[str] = '▁'
lowerCAmelCase: int = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
lowerCAmelCase: Optional[Any] = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
lowerCAmelCase: List[str] = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
lowerCAmelCase: Union[str, Any] = {
'ernie-m-base': 5_1_4,
'ernie-m-large': 5_1_4,
}
lowerCAmelCase: Optional[int] = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class a__( lowerCamelCase__ ):
lowercase__ = ["input_ids"]
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = RESOURCE_FILES_NAMES
def __init__( self : int , __snake_case : List[str] , __snake_case : List[Any]=None , __snake_case : List[str]=False , __snake_case : Dict="utf8" , __snake_case : Any="[UNK]" , __snake_case : List[Any]="[SEP]" , __snake_case : List[str]="[PAD]" , __snake_case : Optional[Any]="[CLS]" , __snake_case : Tuple="[MASK]" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , vocab_file=__snake_case , encoding=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
a : Optional[int] = do_lower_case
a : Union[str, Any] = sentencepiece_model_ckpt
a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
a : Dict = self.load_vocab(filepath=__snake_case )
else:
a : Union[str, Any] = {self.sp_model.id_to_piece(__snake_case ): id for id in range(self.sp_model.get_piece_size() )}
a : Any = {v: k for k, v in self.vocab.items()}
def lowercase_ ( self : Dict , __snake_case : Union[str, Any] ):
if text is None:
return None
a : List[Any] = self.tokenize(__snake_case )
a , a : Any = '', []
for i, ch in enumerate(__snake_case ):
if ch in self.SP_CHAR_MAPPING:
a : int = self.SP_CHAR_MAPPING.get(__snake_case )
else:
a : Tuple = unicodedata.normalize('NFKC' , __snake_case )
if self.is_whitespace(__snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__snake_case ) )
a , a , a : List[str] = normalized_text, [], 0
if self.do_lower_case:
a : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
a : Optional[int] = token[1:]
a : List[Any] = text[offset:].index(__snake_case ) + offset
a : Union[str, Any] = start + len(__snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
a : List[Any] = end
return token_mapping
@property
def lowercase_ ( self : Tuple ):
return len(self.vocab )
def lowercase_ ( self : Optional[Any] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Union[str, Any] ):
a : str = self.__dict__.copy()
a : List[Any] = None
return state
def __setstate__( self : List[str] , __snake_case : str ):
a : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : List[Any] = {}
a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowercase_ ( self : str , __snake_case : Optional[Any] ):
return "".join((self.SP_CHAR_MAPPING.get(__snake_case , __snake_case ) for c in text) )
def lowercase_ ( self : int , __snake_case : Any , __snake_case : List[str]=False , __snake_case : Optional[Any]=64 , __snake_case : Union[str, Any]=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
a : Union[str, Any] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
a : List[Any] = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
a : Union[str, Any] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
a : Optional[Any] = self.sp_model.EncodeAsPieces(__snake_case )
else:
a : List[Any] = self.sp_model.SampleEncodeAsPieces(__snake_case , __snake_case , __snake_case )
a : Any = []
for pi, piece in enumerate(__snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__snake_case ) and pi != 0:
new_pieces.append(__snake_case )
continue
else:
continue
a : List[str] = 0
for i, chunk in enumerate(__snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__snake_case ) or self.is_punct(__snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__snake_case )
a : Optional[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
a : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
a : str = i
if len(__snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowercase_ ( self : Union[str, Any] , __snake_case : Any ):
a : List[Any] = ''.join(__snake_case ).replace(__snake_case , ' ' ).strip()
return out_string
def lowercase_ ( self : List[str] , __snake_case : Dict ):
a : Union[str, Any] = self.convert_ids_to_tokens(__snake_case )
a : List[str] = ''.join(__snake_case ).replace(__snake_case , ' ' ).strip()
return out_string
def lowercase_ ( self : str , __snake_case : List[str] ):
return self.vocab.get(__snake_case , self.vocab.get(self.unk_token ) )
def lowercase_ ( self : Optional[Any] , __snake_case : str ):
return self.reverse_vocab.get(__snake_case , self.unk_token )
def lowercase_ ( self : str , __snake_case : Any , __snake_case : List[str]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
a : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowercase_ ( self : Dict , __snake_case : Tuple , __snake_case : Optional[int]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowercase_ ( self : Dict , __snake_case : Optional[int] , __snake_case : Union[str, Any]=None , __snake_case : str=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def lowercase_ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__snake_case ) + 1) + [1] * (len(__snake_case ) + 3)
def lowercase_ ( self : List[str] , __snake_case : List[Any] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowercase_ ( self : Any , __snake_case : Optional[Any] ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowercase_ ( self : Any , __snake_case : Optional[int] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowercase_ ( self : Optional[Any] , __snake_case : List[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__snake_case ) == 1:
a : Optional[Any] = unicodedata.category(__snake_case )
if cat == "Zs":
return True
return False
def lowercase_ ( self : Any , __snake_case : List[str] ):
a : str = {}
with io.open(__snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(__snake_case ):
a : Any = line.rstrip('\n' )
a : str = int(__snake_case )
return token_to_idx
def lowercase_ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ):
a : Optional[int] = 0
if os.path.isdir(__snake_case ):
a : Tuple = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
a : Optional[int] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a : Tuple = token_index
writer.write(token + '\n' )
index += 1
a : int = os.path.join(__snake_case , 'sentencepiece.bpe.model' )
with open(__snake_case , 'wb' ) as fi:
a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (vocab_file,) | 526 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__( lowerCamelCase__ ):
lowercase__ = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase__ = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase__ = """document_qa"""
lowercase__ = AutoProcessor
lowercase__ = VisionEncoderDecoderModel
lowercase__ = ["""image""", """text"""]
lowercase__ = ["""text"""]
def __init__( self : str , *__snake_case : Any , **__snake_case : List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : "Image" , __snake_case : str ):
a : Union[str, Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
a : Any = task_prompt.replace('{user_input}' , __snake_case )
a : Dict = self.pre_processor.tokenizer(
__snake_case , add_special_tokens=__snake_case , return_tensors='pt' ).input_ids
a : Tuple = self.pre_processor(__snake_case , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowercase_ ( self : Any , __snake_case : int ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__snake_case , ).sequences
def lowercase_ ( self : List[str] , __snake_case : Tuple ):
a : str = self.pre_processor.batch_decode(__snake_case )[0]
a : str = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
a : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
a : List[str] = re.sub(r'<.*?>' , '' , __snake_case , count=1 ).strip() # remove first task start token
a : Dict = self.pre_processor.tokenajson(__snake_case )
return sequence["answer"] | 526 | 1 |
from ..utils import DummyObject, requires_backends
class __A ( metaclass=a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 154 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ="""dandelin/vilt-b32-finetuned-vqa"""
UpperCamelCase__ : str =(
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
UpperCamelCase__ : Any ="""image_qa"""
UpperCamelCase__ : int =AutoProcessor
UpperCamelCase__ : Optional[Any] =AutoModelForVisualQuestionAnswering
UpperCamelCase__ : Dict =["""image""", """text"""]
UpperCamelCase__ : List[Any] =["""text"""]
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return self.pre_processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors='pt' )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowerCamelCase__ ).logits
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 154 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=UpperCamelCase__ , )
def A ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def A ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def A ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=UpperCamelCase__ , )
def A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@require_beam
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def A ( self : Any ):
"""simple docstring"""
import apache_beam as beam
UpperCamelCase = beam.io.parquetio.WriteToParquet
UpperCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase = partial(UpperCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def A ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = NestedBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 430 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : List[Any] = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 430 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , ) ->List[str]:
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[Any] = 13
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : Optional[Any] = 30
SCREAMING_SNAKE_CASE : str = self.seq_length + self.mem_len
SCREAMING_SNAKE_CASE : Dict = 15
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Dict = 99
SCREAMING_SNAKE_CASE : int = [10, 50, 80]
SCREAMING_SNAKE_CASE : Optional[int] = 32
SCREAMING_SNAKE_CASE : List[str] = 32
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Dict = 128
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : Optional[Any] = self.vocab_size - 1
SCREAMING_SNAKE_CASE : List[str] = 0.0_1
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __lowerCAmelCase ( self ) ->str:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = TFTransfoXLModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : str = TFTransfoXLLMHeadModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase ).to_tuple()
SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase ).to_tuple()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = model([input_ids_a, mems_a] ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = TFTransfoXLForSequenceClassification(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__SCREAMING_SNAKE_CASE : Dict = () if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = TFTransfoXLModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , d_embed=37 )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->str:
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(_lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
assert isinstance(_lowerCamelCase , tf.keras.layers.Layer )
SCREAMING_SNAKE_CASE : Optional[Any] = model.get_bias()
assert name is None
else:
SCREAMING_SNAKE_CASE : List[Any] = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE : int = model.get_bias()
assert name is None
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __lowerCAmelCase ( self ) ->int:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = TFTransfoXLModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def __lowerCAmelCase ( self ) ->int:
pass
@require_tf
class a_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : str = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(_lowerCamelCase , max_length=200 , do_sample=_lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
| 333 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=[30, 30] , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=8 , _lowerCamelCase=10 , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : Any = n_targets
SCREAMING_SNAKE_CASE : Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size) * (image_size[0] // patch_size)
SCREAMING_SNAKE_CASE : int = num_patches + 1 + self.num_detection_tokens
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.rand(self.n_targets , 4 , device=_lowerCamelCase )
labels.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->int:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = YolosModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = YolosForObjectDetection(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(pixel_values=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
SCREAMING_SNAKE_CASE : Optional[Any] = model(pixel_values=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : List[str] = False
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
SCREAMING_SNAKE_CASE : Any = []
for i in range(self.model_tester.batch_size ):
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : List[str] = torch.ones(
size=(self.model_tester.n_targets,) , device=_lowerCamelCase , dtype=torch.long )
SCREAMING_SNAKE_CASE : List[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_lowerCamelCase , dtype=torch.float )
labels.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = labels
return inputs_dict
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Union[str, Any] = YolosModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->Optional[int]:
# YOLOS does not use inputs_embeds
pass
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = True
# in YOLOS, the seq_len is different
SCREAMING_SNAKE_CASE : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowerCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __lowerCAmelCase ( self ) ->Optional[int]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# YOLOS has a different seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->List[Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = YolosModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->Dict:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(inputs.pixel_values )
# verify outputs
SCREAMING_SNAKE_CASE : int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify postprocessing
SCREAMING_SNAKE_CASE : List[str] = image_processor.post_process_object_detection(
_lowerCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
SCREAMING_SNAKE_CASE : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = [75, 75, 17, 63, 17]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_lowerCamelCase )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , _lowerCamelCase , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , _lowerCamelCase )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _lowerCamelCase ) )
| 333 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __magic_name__ ( *lowercase_ ) -> Dict:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
UpperCamelCase = list(lowercase_ )
for i in range(len(lowercase_ ) ):
UpperCamelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __magic_name__ ( lowercase_ ) -> bool:
'''simple docstring'''
UpperCamelCase = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(lowercase_ , lowercase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __magic_name__ ( lowercase_ = None , lowercase_ = 128 ) -> Union[str, Any]:
'''simple docstring'''
if function is None:
return functools.partial(lowercase_ , starting_batch_size=lowercase_ )
UpperCamelCase = starting_batch_size
def decorator(*lowercase_ , **lowercase_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCamelCase = list(inspect.signature(lowercase_ ).parameters.keys() )
# Guard against user error
if len(lowercase_ ) < (len(lowercase_ ) + 1):
UpperCamelCase = ", ".join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(lowercase_ , *lowercase_ , **lowercase_ )
except Exception as e:
if should_reduce_batch_size(lowercase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 606 |
def __magic_name__ ( lowercase_ ) -> tuple[int, int]:
'''simple docstring'''
try:
UpperCamelCase = float(lowercase_ )
except ValueError:
raise ValueError("Please enter a valid number" )
UpperCamelCase = decimal - int(lowercase_ )
if fractional_part == 0:
return int(lowercase_ ), 1
else:
UpperCamelCase = len(str(lowercase_ ).split("." )[1] )
UpperCamelCase = int(decimal * (10**number_of_frac_digits) )
UpperCamelCase = 10**number_of_frac_digits
UpperCamelCase , UpperCamelCase = denominator, numerator
while True:
UpperCamelCase = dividend % divisor
if remainder == 0:
break
UpperCamelCase , UpperCamelCase = divisor, remainder
UpperCamelCase , UpperCamelCase = numerator / divisor, denominator / divisor
return int(lowercase_ ), int(lowercase_ )
if __name__ == "__main__":
print(F'{decimal_to_fraction(2) = }')
print(F'{decimal_to_fraction(89.0) = }')
print(F'{decimal_to_fraction("67") = }')
print(F'{decimal_to_fraction("45.0") = }')
print(F'{decimal_to_fraction(1.5) = }')
print(F'{decimal_to_fraction("6.25") = }')
print(F'{decimal_to_fraction("78td") = }')
| 606 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=4 , ) -> Dict:
UpperCAmelCase : Any = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : Dict = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : Dict = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : Optional[int] = num_choices
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase( self ) -> int:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = True
lowercase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase( self ) -> int:
UpperCAmelCase : Dict = FlaxRoFormerModelTester(self )
@slow
def _lowercase( self ) -> int:
for model_class_name in self.all_model_classes:
UpperCAmelCase : str = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=A )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : int = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
UpperCAmelCase : List[str] = jnp.array([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase : List[Any] = model(A )[0]
UpperCAmelCase : Dict = 50000
UpperCAmelCase : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , A )
UpperCAmelCase : int = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
| 672 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : float ) -> float:
"""simple docstring"""
return 0.0
def UpperCamelCase ( __magic_name__ : np.ndarray , __magic_name__ : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
lowercase__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCamelCase ( __magic_name__ : FilterType , __magic_name__ : int ) -> None:
"""simple docstring"""
lowercase__ = 512
lowercase__ = [1] + [0] * (size - 1)
lowercase__ = [filter_type.process(__magic_name__ ) for item in inputs]
lowercase__ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase__ = np.abs(np.fft.fft(__magic_name__ ) )
lowercase__ = 20 * np.logaa(__magic_name__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
lowercase__ = get_bounds(__magic_name__ , __magic_name__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__magic_name__ )
plt.show()
def UpperCamelCase ( __magic_name__ : FilterType , __magic_name__ : int ) -> None:
"""simple docstring"""
lowercase__ = 512
lowercase__ = [1] + [0] * (size - 1)
lowercase__ = [filter_type.process(__magic_name__ ) for item in inputs]
lowercase__ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase__ = np.angle(np.fft.fft(__magic_name__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__magic_name__ , -2 * pi ) )
plt.show()
| 15 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''ibert'''
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]="none" , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 15 | 1 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase__ = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : int = EfficientNetConfig()
lowerCAmelCase : Optional[int] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCAmelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCAmelCase : int = CONFIG_MAP[model_name]["depth_coef"]
lowerCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCAmelCase : List[Any] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCAmelCase : Dict = CONFIG_MAP[model_name]["dw_padding"]
lowerCAmelCase : str = "huggingface/label-files"
lowerCAmelCase : Optional[int] = "imagenet-1k-id2label.json"
lowerCAmelCase : Dict = 1_0_0_0
lowerCAmelCase : Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
lowerCAmelCase : List[str] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase : str = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[str] = CONFIG_MAP[model_name]["image_size"]
lowerCAmelCase : Optional[int] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=SCREAMING_SNAKE_CASE , )
return preprocessor
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Tuple = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCAmelCase : Optional[int] = sorted(set(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : int = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = {b: str(SCREAMING_SNAKE_CASE ) for b, i in zip(SCREAMING_SNAKE_CASE , range(SCREAMING_SNAKE_CASE ) )}
lowerCAmelCase : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCAmelCase : List[Any] = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCAmelCase : str = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCAmelCase : Optional[int] = "efficientnet." + item[1]
lowerCAmelCase : int = "classifier.weight"
lowerCAmelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCAmelCase : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCAmelCase : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowerCAmelCase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowerCAmelCase : Any = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE ) )
else:
lowerCAmelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE , weights="imagenet" , input_tensor=SCREAMING_SNAKE_CASE , input_shape=SCREAMING_SNAKE_CASE , pooling=SCREAMING_SNAKE_CASE , classes=1_0_0_0 , classifier_activation="softmax" , )
lowerCAmelCase : str = original_model.trainable_variables
lowerCAmelCase : Union[str, Any] = original_model.non_trainable_variables
lowerCAmelCase : int = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCAmelCase : int = param.numpy()
lowerCAmelCase : Optional[int] = list(tf_params.keys() )
# Load HuggingFace model
lowerCAmelCase : Tuple = get_efficientnet_config(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase : List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCAmelCase : Optional[int] = rename_keys(SCREAMING_SNAKE_CASE )
replace_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCAmelCase : int = convert_image_processor(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = hf_model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = outputs.logits.detach().numpy()
# Original model inference
lowerCAmelCase : int = False
lowerCAmelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCAmelCase : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowerCAmelCase : Dict = image.img_to_array(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.expand_dims(SCREAMING_SNAKE_CASE , axis=0 )
lowerCAmelCase : Any = original_model.predict(SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
os.mkdir(SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
lowerCAmelCase : Union[str, Any] = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 704 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowercase ( unittest.TestCase ):
def lowercase__ ( self ):
debug_launcher(test_script.main )
def lowercase__ ( self ):
debug_launcher(test_ops.main )
| 385 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 385 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> Any:
if isinstance(UpperCAmelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowerCamelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowerCamelCase_ = np.concatenate(UpperCAmelCase__ , axis=0 )
lowerCamelCase_ = np.array(UpperCAmelCase__ ).astype(np.floataa ) / 2_5_5.0
lowerCamelCase_ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ = 2.0 * image - 1.0
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(UpperCAmelCase__ , dim=0 )
return image
def __lowerCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple=0.9_9_9_5 ) -> List[str]:
if not isinstance(UpperCAmelCase__ , np.ndarray ):
lowerCamelCase_ = True
lowerCamelCase_ = va.device
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = np.sum(va * va / (np.linalg.norm(UpperCAmelCase__ ) * np.linalg.norm(UpperCAmelCase__ )) )
if np.abs(UpperCAmelCase__ ) > DOT_THRESHOLD:
lowerCamelCase_ = (1 - t) * va + t * va
else:
lowerCamelCase_ = np.arccos(UpperCAmelCase__ )
lowerCamelCase_ = np.sin(UpperCAmelCase__ )
lowerCamelCase_ = theta_a * t
lowerCamelCase_ = np.sin(UpperCAmelCase__ )
lowerCamelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase_ = sin_theta_t / sin_theta_a
lowerCamelCase_ = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase__ ).to(UpperCAmelCase__ )
return va
def __lowerCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ) -> List[Any]:
lowerCamelCase_ = F.normalize(UpperCAmelCase__ , dim=-1 )
lowerCamelCase_ = F.normalize(UpperCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __lowerCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) -> Dict:
for param in model.parameters():
lowerCamelCase_ = value
class __A( UpperCAmelCase ):
def __init__( self : Optional[int] , __UpperCamelCase : AutoencoderKL , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __UpperCamelCase : CLIPFeatureExtractor , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , ):
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , clip_model=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , coca_model=__UpperCamelCase , coca_tokenizer=__UpperCamelCase , coca_transform=__UpperCamelCase , )
lowerCamelCase_ = (
feature_extractor.size
if isinstance(feature_extractor.size , __UpperCamelCase )
else feature_extractor.size["""shortest_edge"""]
)
lowerCamelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __UpperCamelCase )
set_requires_grad(self.clip_model , __UpperCamelCase )
def lowercase__ ( self : int , __UpperCamelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowercase__ ( self : int ):
self.enable_attention_slicing(__UpperCamelCase )
def lowercase__ ( self : Dict ):
set_requires_grad(self.vae , __UpperCamelCase )
def lowercase__ ( self : List[Any] ):
set_requires_grad(self.vae , __UpperCamelCase )
def lowercase__ ( self : Tuple ):
set_requires_grad(self.unet , __UpperCamelCase )
def lowercase__ ( self : Dict ):
set_requires_grad(self.unet , __UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Any ):
# get the original timestep using init_timestep
lowerCamelCase_ = min(int(num_inference_steps * strength ) , __UpperCamelCase )
lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase__ ( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : str=None ):
if not isinstance(__UpperCamelCase , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(__UpperCamelCase )}''' )
lowerCamelCase_ = image.to(device=__UpperCamelCase , dtype=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCamelCase )
]
lowerCamelCase_ = torch.cat(__UpperCamelCase , dim=0 )
else:
lowerCamelCase_ = self.vae.encode(__UpperCamelCase ).latent_dist.sample(__UpperCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 0.18215 * init_latents
lowerCamelCase_ = init_latents.repeat_interleave(__UpperCamelCase , dim=0 )
lowerCamelCase_ = randn_tensor(init_latents.shape , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
# get latents
lowerCamelCase_ = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = init_latents
return latents
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = self.coca_transform(__UpperCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCamelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any ):
lowerCamelCase_ = self.feature_extractor.preprocess(__UpperCamelCase )
lowerCamelCase_ = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase_ = self.clip_model.get_image_features(__UpperCamelCase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCamelCase )
lowerCamelCase_ = image_embeddings_clip.repeat_interleave(__UpperCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , ):
lowerCamelCase_ = latents.detach().requires_grad_()
lowerCamelCase_ = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
lowerCamelCase_ = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase_ = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase_ = torch.sqrt(__UpperCamelCase )
lowerCamelCase_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __UpperCamelCase ):
lowerCamelCase_ = self.scheduler.sigmas[index]
lowerCamelCase_ = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.18215 * sample
lowerCamelCase_ = self.vae.decode(__UpperCamelCase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = transforms.Resize(self.feature_extractor_size )(__UpperCamelCase )
lowerCamelCase_ = self.normalize(__UpperCamelCase ).to(latents.dtype )
lowerCamelCase_ = self.clip_model.get_image_features(__UpperCamelCase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCamelCase )
lowerCamelCase_ = spherical_dist_loss(__UpperCamelCase , __UpperCamelCase ).mean() * clip_guidance_scale
lowerCamelCase_ = -torch.autograd.grad(__UpperCamelCase , __UpperCamelCase )[0]
if isinstance(self.scheduler , __UpperCamelCase ):
lowerCamelCase_ = latents.detach() + grads * (sigma**2)
lowerCamelCase_ = noise_pred_original
else:
lowerCamelCase_ = noise_pred_original - torch.sqrt(__UpperCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : List[Any] , __UpperCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __UpperCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[int] = 5_1_2 , __UpperCamelCase : Optional[int] = 5_1_2 , __UpperCamelCase : float = 0.6 , __UpperCamelCase : Optional[int] = 5_0 , __UpperCamelCase : Optional[float] = 7.5 , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[float] = 1_0_0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : float = 0.8 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , ):
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(__UpperCamelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__UpperCamelCase , torch.Generator ) and batch_size > 1:
lowerCamelCase_ = [generator] + [None] * (batch_size - 1)
lowerCamelCase_ = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
lowerCamelCase_ = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase_ = """, """.join(__UpperCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__UpperCamelCase ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase_ = self.get_image_description(__UpperCamelCase )
if style_prompt is None:
if len(__UpperCamelCase ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase_ = self.get_image_description(__UpperCamelCase )
# get prompt text embeddings for content and style
lowerCamelCase_ = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
lowerCamelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
lowerCamelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = slerp(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ = text_embeddings.repeat_interleave(__UpperCamelCase , dim=0 )
# set timesteps
lowerCamelCase_ = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_offset:
lowerCamelCase_ = 1
self.scheduler.set_timesteps(__UpperCamelCase , **__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase_ , lowerCamelCase_ = self.get_timesteps(__UpperCamelCase , __UpperCamelCase , self.device )
lowerCamelCase_ = timesteps[:1].repeat(__UpperCamelCase )
# Preprocess image
lowerCamelCase_ = preprocess(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self.prepare_latents(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text_embeddings.dtype , self.device , __UpperCamelCase )
lowerCamelCase_ = preprocess(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self.prepare_latents(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text_embeddings.dtype , self.device , __UpperCamelCase )
lowerCamelCase_ = slerp(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if clip_guidance_scale > 0:
lowerCamelCase_ = self.get_clip_image_embeddings(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self.get_clip_image_embeddings(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = slerp(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = content_text_input.input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer([""""""] , padding="""max_length""" , max_length=__UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase_ = uncond_embeddings.repeat_interleave(__UpperCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase_ = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
# check if the scheduler accepts generator
lowerCamelCase_ = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase_ = generator
with self.progress_bar(total=__UpperCamelCase ):
for i, t in enumerate(__UpperCamelCase ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
lowerCamelCase_ = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase_ , lowerCamelCase_ = self.cond_fn(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.18215 * latents
lowerCamelCase_ = self.vae.decode(__UpperCamelCase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 103 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowercase = {'''mobilebert-uncased''': 5_1_2}
lowercase = {}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = MobileBertTokenizer
def __init__( self : Union[str, Any] , __UpperCamelCase : str=None , __UpperCamelCase : str=None , __UpperCamelCase : Dict=True , __UpperCamelCase : Any="[UNK]" , __UpperCamelCase : str="[SEP]" , __UpperCamelCase : Dict="[PAD]" , __UpperCamelCase : List[str]="[CLS]" , __UpperCamelCase : Any="[MASK]" , __UpperCamelCase : Any=True , __UpperCamelCase : int=None , **__UpperCamelCase : Dict , ):
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(__UpperCamelCase , normalizer_state.pop("""type""" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**__UpperCamelCase )
lowerCamelCase_ = do_lower_case
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict=None ):
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
lowerCamelCase_ = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 103 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
SCREAMING_SNAKE_CASE__ = (low + high) // 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = max_subarray(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = max_subarray(snake_case__ , mid + 1 , snake_case__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = max_cross_sum(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = float("""-inf""" ), -1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = float("""-inf""" ), -1
SCREAMING_SNAKE_CASE__ = 0
for i in range(snake_case__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
SCREAMING_SNAKE_CASE__ = summ
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
SCREAMING_SNAKE_CASE__ = summ
SCREAMING_SNAKE_CASE__ = i
return max_left, max_right, (left_sum + right_sum)
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [randint(1 , snake_case__ ) for _ in range(snake_case__ )]
SCREAMING_SNAKE_CASE__ = time.time()
max_subarray(snake_case__ , 0 , input_size - 1 )
SCREAMING_SNAKE_CASE__ = time.time()
return end - start
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
SCREAMING_SNAKE_CASE__ = [time_max_subarray(snake_case__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(snake_case__ , snake_case__ ):
print(snake_case__ , """\t\t""" , snake_case__ )
plt.plot(snake_case__ , snake_case__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 196 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Union[str, Any] = BertJapaneseTokenizer
lowerCamelCase__ : str = False
lowerCamelCase__ : Optional[int] = True
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = """こんにちは、世界。 \nこんばんは、世界。"""
SCREAMING_SNAKE_CASE__ = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_input_output_texts(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """こんにちは、世界。\nこんばんは、世界。"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__UpperCAmelCase , """wb""" ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , """rb""" ) as handle:
SCREAMING_SNAKE_CASE__ = pickle.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
try:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
try:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(do_lower_case=__UpperCAmelCase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
try:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(
do_lower_case=__UpperCAmelCase , normalize_text=__UpperCAmelCase , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(normalize_text=__UpperCAmelCase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """こんにちは、世界。\nこんばんは、世界。"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__UpperCAmelCase , """wb""" ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , """rb""" ) as handle:
SCREAMING_SNAKE_CASE__ = pickle.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(do_lower_case=__UpperCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(normalize_text=__UpperCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(trim_whitespace=__UpperCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """こんにちは、世界。\nこんばんは、世界。"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__UpperCAmelCase , """wb""" ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , """rb""" ) as handle:
SCREAMING_SNAKE_CASE__ = pickle.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer(normalize_text=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer(trim_whitespace=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
SCREAMING_SNAKE_CASE__ = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE__ = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(__UpperCAmelCase , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
SCREAMING_SNAKE_CASE__ = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(__UpperCAmelCase , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""ありがとう。""" , add_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Dict = BertJapaneseTokenizer
lowerCamelCase__ : Tuple = False
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **__UpperCAmelCase : List[Any] ) -> List[Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = """こんにちは、世界。 \nこんばんは、世界。"""
SCREAMING_SNAKE_CASE__ = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
__UpperCAmelCase , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = CharacterTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""ありがとう。""" , add_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = """cl-tohoku/bert-base-japanese"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(__UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
SCREAMING_SNAKE_CASE__ = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 196 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=14 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_labels
lowerCamelCase_ = use_mc_token_ids
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
if self.use_mc_token_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self ):
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = CTRLModel(config=_snake_case )
model.to(_snake_case )
model.eval()
model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
model(_snake_case , token_type_ids=_snake_case )
lowerCamelCase_ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = CTRLLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
lowerCamelCase_
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = CTRLForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class snake_case ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_lowerCamelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
_lowerCamelCase = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = CTRLModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
"""simple docstring"""
pass
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = CTRLModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(_snake_case )
lowerCamelCase_ = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=_snake_case ) # Legal the president is
lowerCamelCase_ = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCamelCase_ = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 675 | from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Any = Mock()
_lowerCAmelCase : Dict = conn, Mock()
_lowerCAmelCase : Optional[int] = iter([1, None] )
_lowerCAmelCase : Union[str, Any] = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 424 | 0 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCAmelCase_ : Union[str, Any] = """src/diffusers"""
# Matches is_xxx_available()
UpperCAmelCase_ : int = re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
UpperCAmelCase_ : List[str] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
UpperCAmelCase_ : List[Any] = """
{0} = None
"""
UpperCAmelCase_ : Tuple = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
UpperCAmelCase_ : Optional[int] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def _lowerCAmelCase ( _a : Dict ) -> Dict:
lowerCAmelCase_ : Optional[Any] = _re_backend.findall(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
return "_and_".join(UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> Optional[int]:
with open(os.path.join(UpperCAmelCase__ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ : List[Any] = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = {}
# Go through the end of the file
while line_index < len(UpperCAmelCase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCAmelCase_ : str = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCAmelCase_ : str = []
# Until we unindent, add backend objects to the list
while line_index < len(UpperCAmelCase__ ) and len(lines[line_index] ) > 1:
lowerCAmelCase_ : Tuple = lines[line_index]
lowerCAmelCase_ : str = _re_single_line_import.search(UpperCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCAmelCase__ ) > 0:
lowerCAmelCase_ : Tuple = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase ( _a : List[str] , _a : List[str] ) -> List[Any]:
if name.isupper():
return DUMMY_CONSTANT.format(UpperCAmelCase__ )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCAmelCase__ , UpperCAmelCase__ )
else:
return DUMMY_CLASS.format(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCAmelCase ( _a : Tuple=None ) -> Any:
if backend_specific_objects is None:
lowerCAmelCase_ : Optional[int] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCAmelCase_ : Dict = {}
for backend, objects in backend_specific_objects.items():
lowerCAmelCase_ : int = """[""" + """, """.join(F'"{b}"' for b in backend.split("""_and_""" ) ) + """]"""
lowerCAmelCase_ : Dict = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCAmelCase__ , UpperCAmelCase__ ) for o in objects] )
lowerCAmelCase_ : int = dummy_file
return dummy_files
def _lowerCAmelCase ( _a : Union[str, Any]=False ) -> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCAmelCase_ : str = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCAmelCase_ : Union[str, Any] = os.path.join(UpperCAmelCase__ , """utils""" )
lowerCAmelCase_ : Tuple = {
backend: os.path.join(UpperCAmelCase__ , F'dummy_{short_names.get(UpperCAmelCase__ , UpperCAmelCase__ )}_objects.py' )
for backend in dummy_files.keys()
}
lowerCAmelCase_ : Optional[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ : Dict = f.read()
else:
lowerCAmelCase_ : List[Any] = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(UpperCAmelCase__ , UpperCAmelCase__ )}_objects.py as the main '
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'diffusers.utils.dummy_{short_names.get(UpperCAmelCase__ , UpperCAmelCase__ )}_objects.py. Run `make fix-copies` '
"""to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Optional[int] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 715 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ : Tuple = """src/diffusers"""
UpperCAmelCase_ : str = """."""
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase_ : Any = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase_ : Union[str, Any] = spec.loader.load_module()
def _lowerCAmelCase ( _a : Optional[int] , _a : Optional[int] ) -> Tuple:
return line.startswith(_a ) or len(_a ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , _a ) is not None
def _lowerCAmelCase ( _a : List[Any] ) -> int:
lowerCAmelCase_ : List[str] = object_name.split(""".""" )
lowerCAmelCase_ : List[Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Optional[int] = parts[i]
while i < len(_a ) and not os.path.isfile(os.path.join(_a , F'{module}.py' ) ):
i += 1
if i < len(_a ):
lowerCAmelCase_ : Dict = os.path.join(_a , parts[i] )
if i >= len(_a ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_a , F'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : List[Any] = """"""
lowerCAmelCase_ : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_a ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_a ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : str = line_index
while line_index < len(_a ) and _should_continue(lines[line_index] , _a ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase_ : int = lines[start_index:line_index]
return "".join(_a )
UpperCAmelCase_ : str = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
UpperCAmelCase_ : int = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
UpperCAmelCase_ : List[str] = re.compile(r"""<FILL\s+[^>]*>""")
def _lowerCAmelCase ( _a : Dict ) -> List[Any]:
lowerCAmelCase_ : int = code.split("""\n""" )
lowerCAmelCase_ : Any = 0
while idx < len(_a ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_a ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( _a : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = len(get_indent(_a ) ) > 0
if has_indent:
lowerCAmelCase_ : Union[str, Any] = F'class Bla:\n{code}'
lowerCAmelCase_ : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_a )
lowerCAmelCase_ : str = black.format_str(_a , mode=_a )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = style_docstrings_in_code(_a )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( _a : Tuple , _a : str=False ) -> Tuple:
with open(_a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ : List[str] = f.readlines()
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Dict = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_a ):
lowerCAmelCase_ : str = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : List[Any] = find_code_in_diffusers(_a )
lowerCAmelCase_ : Tuple = get_indent(_a )
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : List[Any] = True
while line_index < len(_a ) and should_continue:
line_index += 1
if line_index >= len(_a ):
break
lowerCAmelCase_ : List[Any] = lines[line_index]
lowerCAmelCase_ : Tuple = _should_continue(_a , _a ) and re.search(F'^{indent}# End copy' , _a ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase_ : str = lines[start_index:line_index]
lowerCAmelCase_ : Optional[Any] = """""".join(_a )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_a ) is None]
lowerCAmelCase_ : str = """\n""".join(_a )
# Before comparing, use the `replace_pattern` on the original code.
if len(_a ) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowerCAmelCase_ : Any = [_re_replace_pattern.search(_a ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pattern.groups()
lowerCAmelCase_ : Union[str, Any] = re.sub(_a , _a , _a )
if option.strip() == "all-casing":
lowerCAmelCase_ : Dict = re.sub(obja.lower() , obja.lower() , _a )
lowerCAmelCase_ : Dict = re.sub(obja.upper() , obja.upper() , _a )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : str = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase_ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase_ : Any = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Dict = start_index + 1
if overwrite and len(_a ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(_a , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_a )
return diffs
def _lowerCAmelCase ( _a : bool = False ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = glob.glob(os.path.join(_a , """**/*.py""" ) , recursive=_a )
lowerCAmelCase_ : Union[str, Any] = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(_a , _a )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_a ) > 0:
lowerCAmelCase_ : List[Any] = """\n""".join(_a )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : List[str] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 440 | 0 |
"""simple docstring"""
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase :Dict = b % a, a
return b
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
if gcd(a__ , a__ ) != 1:
lowerCAmelCase :Optional[Any] = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(a__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :str = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Dict = 0, 1, m
while va != 0:
lowerCAmelCase :Dict = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 553 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any] ) -> List[str]:
lowerCAmelCase :Dict = str(id_ )
lowerCAmelCase :str = None
lowerCAmelCase :Any = None
lowerCAmelCase :List[str] = []
lowerCAmelCase :List[Any] = {} # {vertex:distance}
def __lt__( self : Union[str, Any] , UpperCAmelCase : Any ) -> str:
return self.key < other.key
def __repr__( self : Union[str, Any] ) -> Dict:
return self.id
def UpperCAmelCase__ ( self : Any , UpperCAmelCase : Tuple ) -> List[str]:
self.neighbors.append(UpperCAmelCase )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ) -> Optional[int]:
lowerCAmelCase :str = weight
def UpperCAmelCase ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , a__ )
graph[b - 1].add_edge(graph[a - 1] , a__ )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :List[str] = []
for u in graph:
lowerCAmelCase :Tuple = math.inf
lowerCAmelCase :Any = None
lowerCAmelCase :Dict = 0
lowerCAmelCase :int = graph[:]
while q:
lowerCAmelCase :str = min(a__ )
q.remove(a__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCAmelCase :List[str] = u
lowerCAmelCase :List[Any] = u.edges[v.id]
for i in range(1 , len(a__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
for u in graph:
lowerCAmelCase :List[str] = math.inf
lowerCAmelCase :str = None
lowerCAmelCase :Optional[int] = 0
lowerCAmelCase :Dict = list(a__ )
hq.heapify(a__ )
while h:
lowerCAmelCase :Union[str, Any] = hq.heappop(a__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCAmelCase :int = u
lowerCAmelCase :List[Any] = u.edges[v.id]
hq.heapify(a__ )
for i in range(1 , len(a__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 553 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Optional[int] = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 512 | '''simple docstring'''
import math
def _lowerCAmelCase ( __a ) -> str:
'''simple docstring'''
_UpperCamelCase :Dict =0
_UpperCamelCase :List[str] =0
while num > 0:
_UpperCamelCase :Any =num % 8
_UpperCamelCase :str =octal + (remainder * math.floor(math.pow(10 , __a ) ))
counter += 1
_UpperCamelCase :int =math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(__a )}'''
def _lowerCAmelCase ( ) -> None:
'''simple docstring'''
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(2_16 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(5_12 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main() | 512 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "focalnet"
def __init__(self : List[Any] , UpperCAmelCase_ : List[str]=224 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Any=96 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=[192, 384, 768, 768] , UpperCAmelCase_ : Optional[Any]=[2, 2, 6, 2] , UpperCAmelCase_ : Optional[Any]=[2, 2, 2, 2] , UpperCAmelCase_ : int=[3, 3, 3, 3] , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[Any]=4.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=1E-4 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Tuple , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: str =image_size
lowerCamelCase__: Tuple =patch_size
lowerCamelCase__: List[str] =num_channels
lowerCamelCase__: Dict =embed_dim
lowerCamelCase__: str =use_conv_embed
lowerCamelCase__: Union[str, Any] =hidden_sizes
lowerCamelCase__: Optional[Any] =depths
lowerCamelCase__: Union[str, Any] =focal_levels
lowerCamelCase__: Optional[Any] =focal_windows
lowerCamelCase__: Tuple =hidden_act
lowerCamelCase__: List[str] =mlp_ratio
lowerCamelCase__: Optional[int] =hidden_dropout_prob
lowerCamelCase__: int =drop_path_rate
lowerCamelCase__: List[str] =use_layerscale
lowerCamelCase__: Optional[Any] =layerscale_value
lowerCamelCase__: Dict =use_post_layernorm
lowerCamelCase__: Any =use_post_layernorm_in_modulation
lowerCamelCase__: List[Any] =normalize_modulator
lowerCamelCase__: Union[str, Any] =initializer_range
lowerCamelCase__: List[str] =layer_norm_eps
lowerCamelCase__: Dict =encoder_stride
lowerCamelCase__: Optional[Any] =["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)]
lowerCamelCase__ , lowerCamelCase__: Any =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names)
| 59 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : TransformeraDModel , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : KarrasDiffusionSchedulers , lowerCAmelCase_ : Optional[Dict[int, str]] = None , ) -> List[Any]:
super().__init__()
self.register_modules(transformer=lowerCAmelCase_ , vae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCAmelCase_ : Union[str, Any] = int(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = dict(sorted(self.labels.items() ) )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, List[str]] ) -> List[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = list(lowerCAmelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.transformer.config.sample_size
UpperCAmelCase_ : Optional[Any] = self.transformer.config.in_channels
UpperCAmelCase_ : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase_ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCAmelCase_ , device=self.device ).reshape(-1 )
UpperCAmelCase_ : Optional[Any] = torch.tensor([1_000] * batch_size , device=self.device )
UpperCAmelCase_ : int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase_ : Optional[Any] = latent_model_input[: len(lowerCAmelCase_ ) // 2]
UpperCAmelCase_ : int = torch.cat([half, half] , dim=0 )
UpperCAmelCase_ : Optional[int] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = t
if not torch.is_tensor(lowerCAmelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase_ : List[Any] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase_ : int = torch.intaa if is_mps else torch.intaa
UpperCAmelCase_ : Optional[int] = torch.tensor([timesteps] , dtype=lowerCAmelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase_ : List[Any] = self.transformer(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.split(lowerCAmelCase_ , len(lowerCAmelCase_ ) // 2 , dim=0 )
UpperCAmelCase_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase_ : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torch.split(lowerCAmelCase_ , lowerCAmelCase_ , dim=1 )
else:
UpperCAmelCase_ : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase_ : Optional[int] = latent_model_input
UpperCAmelCase_ : Dict = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase_ : Dict = self.vae.decode(lowerCAmelCase_ ).sample
UpperCAmelCase_ : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 95 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '▁'
_lowerCAmelCase = {'vocab_file': 'spiece.model'}
_lowerCAmelCase = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
_lowerCAmelCase = {
'google/reformer-crime-and-punishment': 52_42_88,
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Any = ["""input_ids""", """attention_mask"""]
def __init__( self :List[Any] , __magic_name__ :List[Any] , __magic_name__ :Union[str, Any]="</s>" , __magic_name__ :List[str]="<unk>" , __magic_name__ :List[Any]=[] , __magic_name__ :Optional[Dict[str, Any]] = None , **__magic_name__ :Optional[Any] , ) ->None:
lowercase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowercase : Tuple = vocab_file
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
@property
def __snake_case ( self :Optional[int] ) ->Any:
return self.sp_model.get_piece_size()
def __snake_case ( self :Optional[Any] ) ->Dict[str, int]:
lowercase : Union[str, Any] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Union[str, Any] ) ->List[str]:
lowercase : Tuple = self.__dict__.copy()
lowercase : Optional[Any] = None
return state
def __setstate__( self :List[Any] , __magic_name__ :Optional[int] ) ->List[str]:
lowercase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase : int = {}
lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self :Any , __magic_name__ :str ) ->List[str]:
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __snake_case ( self :Optional[Any] , __magic_name__ :Any ) ->Tuple:
return self.sp_model.piece_to_id(__magic_name__ )
def __snake_case ( self :Dict , __magic_name__ :List[Any] ) ->Union[str, Any]:
if index < self.sp_model.get_piece_size():
lowercase : Optional[Any] = self.sp_model.IdToPiece(__magic_name__ )
return token
def __snake_case ( self :Tuple , __magic_name__ :Optional[int] ) ->Optional[int]:
lowercase : Optional[int] = []
lowercase : List[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__magic_name__ ) + token
lowercase : int = []
else:
current_sub_tokens.append(__magic_name__ )
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __snake_case ( self :Optional[int] , __magic_name__ :str , __magic_name__ :Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Any = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , """wb""" ) as fi:
lowercase : List[str] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 348 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_lowerCAmelCase = 'examples/'
_lowerCAmelCase = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_lowerCAmelCase = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
_lowerCAmelCase = 'README.md'
def UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : str = f.read()
lowercase , lowercase : Optional[int] = REPLACE_PATTERNS[pattern]
lowercase : Optional[Any] = replace.replace("""VERSION""" , _A )
lowercase : Tuple = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def UpperCamelCase ( _A ) -> List[str]:
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def UpperCamelCase ( _A , _A=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def UpperCamelCase ( ) -> Any:
lowercase : Any = """🤗 Transformers currently provides the following architectures"""
lowercase : Dict = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : str = f.readlines()
# Find the start of the list.
lowercase : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowercase : List[str] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def UpperCamelCase ( ) -> Tuple:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowercase : List[str] = f.read()
lowercase : List[Any] = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def UpperCamelCase ( _A=False ) -> List[Any]:
lowercase : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowercase : Union[str, Any] = default_version.base_version
elif patch:
lowercase : Optional[Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowercase : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowercase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
lowercase : Optional[int] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def UpperCamelCase ( ) -> Tuple:
lowercase : Any = get_version()
lowercase : Optional[int] = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowercase : Dict = current_version.base_version
# Check with the user we got that right.
lowercase : List[Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
lowercase : int = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 348 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 241 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _snake_case ( _A ):
_A = 'Speech2TextFeatureExtractor'
_A = 'Speech2TextTokenizer'
def __init__( self ,UpperCamelCase ,UpperCamelCase ) -> int:
super().__init__(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = self.feature_extractor
snake_case__ :Any = False
def __call__( self ,*UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase ,**UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
snake_case__ :int = kwargs.pop("raw_speech" )
else:
snake_case__ :Tuple = kwargs.pop("audio" ,UpperCamelCase )
snake_case__ :List[Any] = kwargs.pop("sampling_rate" ,UpperCamelCase )
snake_case__ :Tuple = kwargs.pop("text" ,UpperCamelCase )
if len(UpperCamelCase ) > 0:
snake_case__ :str = args[0]
snake_case__ :Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
snake_case__ :List[Any] = self.feature_extractor(UpperCamelCase ,*UpperCamelCase ,sampling_rate=UpperCamelCase ,**UpperCamelCase )
if text is not None:
snake_case__ :List[Any] = self.tokenizer(UpperCamelCase ,**UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case__ :Optional[Any] = encodings["input_ids"]
return inputs
def lowerCAmelCase_ ( self ,*UpperCamelCase ,**UpperCamelCase ) -> Any:
return self.tokenizer.batch_decode(*UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,*UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase ,**UpperCamelCase )
@contextmanager
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
snake_case__ :Optional[int] = True
snake_case__ :str = self.tokenizer
yield
snake_case__ :Optional[Any] = self.feature_extractor
snake_case__ :Tuple = False | 241 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Optional[Any] = ['''image_processor''', '''tokenizer''']
lowerCamelCase__: Tuple = '''ChineseCLIPImageProcessor'''
lowerCamelCase__: int = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __snake_case=None , __snake_case=None , **__snake_case ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __snake_case , )
UpperCAmelCase: Any = kwargs.pop("feature_extractor" )
UpperCAmelCase: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__snake_case , __snake_case )
UpperCAmelCase: Dict = self.image_processor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ) -> Dict:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase: str = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
UpperCAmelCase: Tuple = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
UpperCAmelCase: int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def A__ ( self , *__snake_case , **__snake_case ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A__ ( self , *__snake_case , **__snake_case ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = self.tokenizer.model_input_names
UpperCAmelCase: Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __snake_case , )
return self.image_processor_class
| 166 |
from collections import deque
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case , __snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase: Tuple = process_name # process name
UpperCAmelCase: Optional[int] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase: Tuple = arrival_time
UpperCAmelCase: List[Any] = burst_time # remaining burst time
UpperCAmelCase: Optional[Any] = 0 # total time of the process wait in ready queue
UpperCAmelCase: List[Any] = 0 # time from arrival time to completion time
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
UpperCAmelCase: str = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase: str = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase: Union[str, Any] = queue
# current time
UpperCAmelCase: Any = current_time
# finished process is in this sequence queue
UpperCAmelCase: deque[Process] = deque()
def A__ ( self ) -> list[str]:
"""simple docstring"""
UpperCAmelCase: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: List[Any] = []
for i in range(len(__snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: List[Any] = []
for i in range(len(__snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: Tuple = []
for i in range(len(__snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def A__ ( self , __snake_case ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A__ ( self , __snake_case ) -> deque[Process]:
"""simple docstring"""
UpperCAmelCase: deque[Process] = deque() # sequence deque of finished process
while len(__snake_case ) != 0:
UpperCAmelCase: Union[str, Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase: Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCAmelCase: Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase: str = self.current_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A__ ( self , __snake_case , __snake_case ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
UpperCAmelCase: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__snake_case ) ):
UpperCAmelCase: Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase: Union[str, Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase: Optional[Any] = 0
# set the finish time
UpperCAmelCase: Dict = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase: Optional[Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A__ ( self ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase , UpperCAmelCase: Dict = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ : Tuple = Process('P1', 0, 5_3)
snake_case_ : List[str] = Process('P2', 0, 1_7)
snake_case_ : Optional[Any] = Process('P3', 0, 6_8)
snake_case_ : int = Process('P4', 0, 2_4)
snake_case_ : Optional[Any] = 3
snake_case_ : Union[str, Any] = [1_7, 2_5]
snake_case_ : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
snake_case_ : List[Any] = Process('P1', 0, 5_3)
snake_case_ : Optional[Any] = Process('P2', 0, 1_7)
snake_case_ : Optional[int] = Process('P3', 0, 6_8)
snake_case_ : List[Any] = Process('P4', 0, 2_4)
snake_case_ : Tuple = 3
snake_case_ : Union[str, Any] = [1_7, 2_5]
snake_case_ : Optional[int] = deque([Pa, Pa, Pa, Pa])
snake_case_ : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 166 | 1 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Dict = logging.get_logger(__name__)
def _a (lowercase__ : str , lowercase__ : str ) -> Tuple:
"""simple docstring"""
__snake_case = RobertaPreLayerNormConfig.from_pretrained(
lowercase__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__snake_case = torch.load(hf_hub_download(repo_id=lowercase__ , filename='pytorch_model.bin' ) )
__snake_case = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__snake_case = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__snake_case = tensor_value
__snake_case = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowercase__ , config=lowercase__ , state_dict=lowercase__ )
model.save_pretrained(lowercase__ )
# convert tokenizer
__snake_case = AutoTokenizer.from_pretrained(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_a : List[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 56 | import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=56 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=7 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=4 , lowerCAmelCase_="block_sparse" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=3 , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
__lowercase = rescale_embeddings
__lowercase = attention_type
__lowercase = use_bias
__lowercase = block_size
__lowercase = num_random_blocks
def snake_case__ ( self ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case__ ( self ):
__lowercase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
return model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-5 , lowerCAmelCase_="outputs" , lowerCAmelCase_=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 321 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowercase :
'''simple docstring'''
lowerCAmelCase__ = MBartConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = "gelu"
def __init__( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : int=37 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : List[Any]=20 , __lowerCamelCase : int=2 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Optional[int]=0 , ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = bos_token_id
def a__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__ = prepare_mbart_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def a__ ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = TFMBartModel(config=__lowerCamelCase ).get_decoder()
lowerCamelCase__ = inputs_dict["input_ids"]
lowerCamelCase__ = input_ids[:1, :]
lowerCamelCase__ = inputs_dict["attention_mask"][:1, :]
lowerCamelCase__ = inputs_dict["head_mask"]
lowerCamelCase__ = 1
# first forward pass
lowerCamelCase__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ = outputs.to_tuple()
lowerCamelCase__ = past_key_values[1]
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
if attention_mask is None:
lowerCamelCase__ = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
lowerCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
lowerCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
lowerCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def a__ ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def a__ ( self : str ) -> str:
'''simple docstring'''
lowerCamelCase__ = TFMBartModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCamelCase )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowerCAmelCase__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowerCAmelCase__ = "facebook/mbart-large-en-ro"
@cached_property
def a__ ( self : Dict ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ ( self : str , **__lowerCamelCase : Dict ) -> Any:
'''simple docstring'''
lowerCamelCase__ = self.translate_src_text(**__lowerCamelCase )
self.assertListEqual(self.expected_text , __lowerCamelCase )
def a__ ( self : Any , **__lowerCamelCase : str ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = self.tokenizer(self.src_text , **__lowerCamelCase , return_tensors="tf" )
lowerCamelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowerCamelCase__ = self.tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
return generated_words
@slow
def a__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 187 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Dict ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_embed
lowerCamelCase__ = d_proj
lowerCamelCase__ = cutoffs + [vocab_size]
lowerCamelCase__ = [0] + self.cutoffs
lowerCamelCase__ = div_val
lowerCamelCase__ = self.cutoffs[0]
lowerCamelCase__ = len(self.cutoffs ) - 1
lowerCamelCase__ = self.shortlist_size + self.n_clusters
lowerCamelCase__ = keep_order
lowerCamelCase__ = []
lowerCamelCase__ = []
def a__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
if self.n_clusters > 0:
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_weight" )
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCamelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' , )
self.out_projs.append(__lowerCamelCase )
else:
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ = self.d_embed // (self.div_val**i)
lowerCamelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' )
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__lowerCamelCase )
@staticmethod
def a__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = x
if proj is not None:
lowerCamelCase__ = tf.einsum("ibd,ed->ibe" , __lowerCamelCase , __lowerCamelCase )
return tf.einsum("ibd,nd->ibn" , __lowerCamelCase , __lowerCamelCase ) + b
@staticmethod
def a__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
lowerCamelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(__lowerCamelCase , __lowerCamelCase )
def a__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=False ) -> int:
'''simple docstring'''
lowerCamelCase__ = 0
if self.n_clusters == 0:
lowerCamelCase__ = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCamelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase , axis=-1 )
else:
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = []
lowerCamelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCamelCase__ = (target >= l_idx) & (target < r_idx)
lowerCamelCase__ = tf.where(__lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) - l_idx
if self.div_val == 1:
lowerCamelCase__ = self.out_layers[0][0][l_idx:r_idx]
lowerCamelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCamelCase__ = self.out_layers[i][0]
lowerCamelCase__ = self.out_layers[i][1]
if i == 0:
lowerCamelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCamelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
lowerCamelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCamelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowerCamelCase )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase ) )
lowerCamelCase__ = tf.concat(__lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
lowerCamelCase__ = tf.reduce_mean(__lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowerCamelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 187 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : List[str] = {'''vocab_file''': '''spiece.model'''}
lowercase_ : str = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowercase_ : List[Any] = {'''bert_for_seq_generation''': 5_12}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = []
A__ = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<::::>" , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE : Dict = vocab_file
_SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : List[Any] = None
return state
def __setstate__( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
_SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
_SCREAMING_SNAKE_CASE : Tuple = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_SCREAMING_SNAKE_CASE : int = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 572 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = KandinskyVaaImgaImgPipeline
snake_case = ["image_embeds", "negative_image_embeds", "image"]
snake_case = [
"image_embeds",
"negative_image_embeds",
"image",
]
snake_case = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case = False
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return 32
@property
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
return 32
@property
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def _snake_case ( self )->str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return 100
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[int] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
A_ : Tuple = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self )->int:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self )->Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = self.dummy_unet
A_ : Optional[int] = self.dummy_movq
A_ : Dict = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
A_ : List[Any] = DDIMScheduler(**_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->List[str]:
'''simple docstring'''
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
A_ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
# create init_image
A_ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Optional[int] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((256, 256) )
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Any = '''cpu'''
A_ : List[str] = self.get_dummy_components()
A_ : Any = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
A_ : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[str] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
A_ : Optional[Any] = output.images
A_ : Union[str, Any] = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
A_ : Any = image[0, -3:, -3:, -1]
A_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Any = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
A_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
A_ : Dict = '''A red cartoon frog, 4k'''
A_ : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
A_ : int = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
A_ : int = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
A_ , A_ : Optional[Any] = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
A_ : List[Any] = pipeline(
image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
A_ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 590 | 0 |
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = int(np.ceil((x_end - xa) / h ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowerCamelCase__ ):
lowerCamelCase_ = f(lowerCamelCase__ , y[k] )
lowerCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCamelCase_ = f(x + h , y[k] + h * ka )
lowerCamelCase_ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'vit'
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ) -> int:
super().__init__(**lowercase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = encoder_stride
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_( self ) -> float:
return 1e-4
| 313 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=False , _A=True , _A=False , _A=False , _A=19 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : int = parent
__A : Dict = batch_size
__A : List[str] = seq_length
__A : str = is_training
__A : str = use_input_mask
__A : Optional[Any] = use_token_type_ids
__A : List[Any] = use_labels
__A : int = vocab_size
__A : List[Any] = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Any = num_attention_heads
__A : Optional[int] = intermediate_size
__A : int = hidden_act
__A : Dict = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Optional[Any] = type_vocab_size
__A : Optional[Any] = type_sequence_label_size
__A : Tuple = initializer_range
__A : Dict = num_labels
__A : List[Any] = num_choices
__A : Union[str, Any] = scope
def UpperCAmelCase_ ( self ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : List[Any] = None
if self.use_input_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Union[str, Any] = None
__A : Optional[int] = None
__A : int = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Dict = ids_tensor([self.batch_size] , self.num_choices )
__A : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
__A : Any = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_A , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A ):
__A : Union[str, Any] = EsmForProteinFolding(config=_A ).float()
model.to(_A )
model.eval()
__A : Optional[int] = model(_A , attention_mask=_A )
__A : Tuple = model(_A )
__A : Tuple = model(_A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Tuple = config_and_inputs
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCamelCase : Dict = ()
UpperCamelCase : Optional[int] = {} if is_torch_available() else {}
UpperCamelCase : str = False
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = EsmFoldModelTester(self )
__A : Dict = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
@unittest.skip('Does not support attention outputs' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold only has one output format.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self ):
pass
@require_torch
class _A( snake_case__ ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
__A : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__A : Any = model(_A )['positions']
__A : Union[str, Any] = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _A , atol=1e-4 ) )
| 239 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = '''fnet'''
def __init__( self , _A=32000 , _A=768 , _A=12 , _A=3072 , _A="gelu_new" , _A=0.1 , _A=512 , _A=4 , _A=0.0_2 , _A=1e-1_2 , _A=False , _A=512 , _A=3 , _A=1 , _A=2 , **_A , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__A : str = vocab_size
__A : Union[str, Any] = max_position_embeddings
__A : Union[str, Any] = hidden_size
__A : Optional[int] = num_hidden_layers
__A : str = intermediate_size
__A : Tuple = hidden_act
__A : Tuple = hidden_dropout_prob
__A : List[Any] = initializer_range
__A : List[str] = type_vocab_size
__A : Optional[Any] = layer_norm_eps
__A : Tuple = use_tpu_fourier_optimizations
__A : Optional[int] = tpu_short_seq_length
| 239 | 1 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1024 , lowerCAmelCase__=1024 , lowerCAmelCase__=False , **lowerCAmelCase__ ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = SeqaSeqDataset(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , type_path="train" , **lowerCAmelCase__ )
UpperCAmelCase_ = tok.pad_token_id
def get_lens(lowerCAmelCase__ ):
UpperCAmelCase_ = tqdm(
DataLoader(lowerCAmelCase__ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ = []
for batch in dl:
UpperCAmelCase_ = batch["input_ids"].ne(lowerCAmelCase__ ).sum(1 ).tolist()
UpperCAmelCase_ = batch["labels"].ne(lowerCAmelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
max_lens.append(max(lowerCAmelCase__ , lowerCAmelCase__ ) )
else:
max_lens.extend(lowerCAmelCase__ )
return max_lens
UpperCAmelCase_ = get_lens(lowerCAmelCase__ )
UpperCAmelCase_ = SeqaSeqDataset(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , type_path="val" , **lowerCAmelCase__ )
UpperCAmelCase_ = get_lens(lowerCAmelCase__ )
pickle_save(lowerCAmelCase__ , train_ds.len_file )
pickle_save(lowerCAmelCase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 703 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return []
UpperCAmelCase_ , UpperCAmelCase_ = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 14 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = MobileBertTokenizer
_UpperCamelCase : List[Any] = MobileBertTokenizerFast
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : List[str] = filter_non_english
_UpperCamelCase : Any = """google/mobilebert-uncased"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : Tuple = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_snake_case : Optional[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : Any = """UNwant\u00E9d,running"""
_snake_case : Optional[Any] = """unwanted, running"""
return input_text, output_text
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : List[str] = self.tokenizer_class(self.vocab_file )
_snake_case : Union[str, Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer()
_snake_case : List[Any] = """UNwant\u00E9d,running"""
_snake_case : Tuple = tokenizer.tokenize(snake_case__ )
_snake_case : str = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_snake_case : Union[str, Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_snake_case : Any = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_snake_case : int = self.get_rust_tokenizer()
_snake_case : Any = tokenizer.encode(snake_case__ )
_snake_case : Tuple = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# With lower casing
_snake_case : List[Any] = self.get_tokenizer(do_lower_case=snake_case__ )
_snake_case : Any = self.get_rust_tokenizer(do_lower_case=snake_case__ )
_snake_case : Any = """UNwant\u00E9d,running"""
_snake_case : List[str] = tokenizer.tokenize(snake_case__ )
_snake_case : List[str] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_snake_case : Any = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_snake_case : str = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_snake_case : List[Any] = self.get_rust_tokenizer()
_snake_case : Tuple = tokenizer.encode(snake_case__ )
_snake_case : str = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : str = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : List[Any] = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = BasicTokenizer(do_lower_case=snake_case__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_snake_case : Union[str, Any] = {}
for i, token in enumerate(snake_case__ ):
_snake_case : int = i
_snake_case : Union[str, Any] = WordpieceTokenizer(vocab=snake_case__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : List[str] = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
_snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=snake_case__ )
_snake_case : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case__ )
_snake_case : int = tokenizer.build_inputs_with_special_tokens(snake_case__ )
_snake_case : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_snake_case : Any = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case : Dict = tokenizer_r.encode_plus(
snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ , )
_snake_case : str = tokenizer_r.do_lower_case if hasattr(snake_case__ , 'do_lower_case' ) else False
_snake_case : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : str = ["""的""", """人""", """有"""]
_snake_case : Optional[int] = """""".join(snake_case__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case : Tuple = True
_snake_case : List[str] = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_snake_case : int = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_snake_case : Dict = tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ )
_snake_case : int = tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ )
_snake_case : Tuple = tokenizer_r.convert_ids_to_tokens(snake_case__ )
_snake_case : Any = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_snake_case : Union[str, Any] = False
_snake_case : Dict = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_snake_case : str = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_snake_case : int = tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ )
_snake_case : Tuple = tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ )
_snake_case : Optional[Any] = tokenizer_r.convert_ids_to_tokens(snake_case__ )
_snake_case : Optional[int] = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case : Dict = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(snake_case__ )
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
| 304 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ = False, ) -> Any:
"""simple docstring"""
super().__init__()
lowercase_ : str = nn.Embedding(snake_case__, snake_case__ )
lowercase_ : Dict = nn.Embedding(snake_case__, snake_case__ )
lowercase_ : List[str] = False
lowercase_ : Optional[int] = nn.Dropout(p=snake_case__ )
lowercase_ : Any = TaConfig(
vocab_size=snake_case__, d_model=snake_case__, num_heads=snake_case__, d_kv=snake_case__, d_ff=snake_case__, dropout_rate=snake_case__, feed_forward_proj=snake_case__, is_decoder=snake_case__, is_encoder_decoder=snake_case__, )
lowercase_ : List[Any] = nn.ModuleList()
for lyr_num in range(snake_case__ ):
lowercase_ : List[str] = TaBlock(snake_case__ )
self.encoders.append(snake_case__ )
lowercase_ : Tuple = TaLayerNorm(snake_case__ )
lowercase_ : Dict = nn.Dropout(p=snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.token_embedder(snake_case__ )
lowercase_ : Tuple = encoder_input_tokens.shape[1]
lowercase_ : List[str] = torch.arange(snake_case__, device=encoder_input_tokens.device )
x += self.position_encoding(snake_case__ )
lowercase_ : Union[str, Any] = self.dropout_pre(snake_case__ )
# inverted the attention mask
lowercase_ : List[str] = encoder_input_tokens.size()
lowercase_ : Union[str, Any] = self.get_extended_attention_mask(snake_case__, snake_case__ )
for lyr in self.encoders:
lowercase_ : Optional[Any] = lyr(snake_case__, snake_case__ )[0]
lowercase_ : Dict = self.layer_norm(snake_case__ )
return self.dropout_post(snake_case__ ), encoder_inputs_mask | 458 | 0 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase__ : Optional[Any] =6_378_137.0
UpperCAmelCase__ : Any =6_356_752.314_245
UpperCAmelCase__ : Optional[int] =6_37_81_37
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
lowerCamelCase =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCamelCase =atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
lowerCamelCase =atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCamelCase =haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCamelCase =(b_lata + b_lata) / 2
lowerCamelCase =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCamelCase =(sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
lowerCamelCase =cos(sigma / 2 ) ** 2
lowerCamelCase =(sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCamelCase =(cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
lowerCamelCase =sin(sigma / 2 ) ** 2
lowerCamelCase =(sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269 |
from math import sqrt
def _lowercase ( _UpperCAmelCase ) -> int:
lowerCamelCase =0
for i in range(1 , int(sqrt(_UpperCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(_UpperCAmelCase ):
total += i + n // i
elif i == sqrt(_UpperCAmelCase ):
total += i
return total - n
def _lowercase ( _UpperCAmelCase = 1_00_00 ) -> int:
lowerCamelCase =sum(
i
for i in range(1 , _UpperCAmelCase )
if sum_of_divisors(sum_of_divisors(_UpperCAmelCase ) ) == i and sum_of_divisors(_UpperCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 269 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = '''dandelin/vilt-b32-finetuned-vqa'''
UpperCamelCase_ : int = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
UpperCamelCase_ : int = '''image_qa'''
UpperCamelCase_ : int = AutoProcessor
UpperCamelCase_ : List[Any] = AutoModelForVisualQuestionAnswering
UpperCamelCase_ : Optional[int] = ['''image''', '''text''']
UpperCamelCase_ : Optional[Any] = ['''text''']
def __init__( self : Optional[Any] , *A_ : Union[str, Any] , **A_ : Dict ) -> Optional[Any]:
requires_backends(self , ['''vision'''] )
super().__init__(*A_ , **A_ )
def lowercase ( self : Union[str, Any] , A_ : "Image" , A_ : str ) -> Optional[int]:
return self.pre_processor(A_ , A_ , return_tensors='''pt''' )
def lowercase ( self : str , A_ : Dict ) -> Tuple:
with torch.no_grad():
return self.model(**A_ ).logits
def lowercase ( self : Union[str, Any] , A_ : Any ) -> Dict:
__snake_case = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx] | 564 | """simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''segformer'''
def __init__( self : Optional[Any] , A_ : Tuple=3 , A_ : int=4 , A_ : int=[2, 2, 2, 2] , A_ : Any=[8, 4, 2, 1] , A_ : str=[32, 64, 160, 256] , A_ : str=[7, 3, 3, 3] , A_ : Dict=[4, 2, 2, 2] , A_ : List[str]=[1, 2, 5, 8] , A_ : Union[str, Any]=[4, 4, 4, 4] , A_ : Union[str, Any]="gelu" , A_ : int=0.0 , A_ : Tuple=0.0 , A_ : List[str]=0.1 , A_ : Tuple=0.02 , A_ : Optional[Any]=0.1 , A_ : int=1E-6 , A_ : Optional[int]=256 , A_ : Tuple=255 , **A_ : str , ) -> str:
super().__init__(**A_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , A_ , )
__snake_case = num_channels
__snake_case = num_encoder_blocks
__snake_case = depths
__snake_case = sr_ratios
__snake_case = hidden_sizes
__snake_case = patch_sizes
__snake_case = strides
__snake_case = mlp_ratios
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = drop_path_rate
__snake_case = layer_norm_eps
__snake_case = decoder_hidden_size
__snake_case = kwargs.get('''reshape_last_stage''' , A_ )
__snake_case = semantic_loss_ignore_index
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = version.parse('''1.11''' )
@property
def lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase ( self : List[Any] ) -> float:
return 1E-4
@property
def lowercase ( self : Any ) -> int:
return 12 | 564 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : Optional[Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ['LayoutLMv2FeatureExtractor']
a_ : int = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 708 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a__ ( *snake_case ):
"""simple docstring"""
with open(snake_case , '''r''' ) as fh:
fcntl.flock(snake_case , fcntl.LOCK_EX )
try:
print(*snake_case )
finally:
fcntl.flock(snake_case , fcntl.LOCK_UN )
lowercase_ = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
lowercase_ = torch.device("""cuda""", local_rank)
lowercase_ = socket.gethostname()
lowercase_ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowercase_ = dist.get_rank()
lowercase_ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 74 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_snake_case : Optional[int] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 0 |
__lowercase : List[Any] = '''Input must be a string of 8 numbers plus letter'''
__lowercase : List[str] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowercase ( __A : str ) -> bool:
'''simple docstring'''
if not isinstance(__A , __A ):
snake_case : Dict = f"""Expected string as input, found {type(__A ).__name__}"""
raise TypeError(__A )
snake_case : List[str] = spanish_id.replace("""-""" , """""" ).upper()
if len(__A ) != 9:
raise ValueError(__A )
try:
snake_case : Optional[int] = int(spanish_id_clean[0:8] )
snake_case : Dict = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__A ) from ex
if letter.isdigit():
raise ValueError(__A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
def lowercase ( __A : int = 100_0000 ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __A ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 315 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = DebertaTokenizer
A_ : Union[str, Any] = True
A_ : Union[str, Any] = DebertaTokenizerFast
def a (self : List[str] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case = {'''unk_token''': '''[UNK]'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def a (self : List[str] , **a__ : Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def a (self : Union[str, Any] , a__ : str ):
"""simple docstring"""
__snake_case = '''lower newer'''
__snake_case = '''lower newer'''
return input_text, output_text
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = '''lower newer'''
__snake_case = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def a (self : Any ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = tokenizer('''Hello''' , '''World''' )
__snake_case = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , a__ )
@slow
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=a__ )
__snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a__ )
__snake_case = tokenizer.encode(
'''sequence builders''' , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def a (self : List[str] ):
"""simple docstring"""
__snake_case = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__snake_case = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__snake_case = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__snake_case = tokenizer(a__ , padding=a__ )
__snake_case = [tokenizer.decode(a__ , skip_special_tokens=a__ ) for seq in encoding['''input_ids''']]
# fmt: off
__snake_case = {
'''input_ids''': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__snake_case = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , a__ )
for expected, decoded in zip(a__ , a__ ):
self.assertEqual(a__ , a__ )
| 592 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: set , SCREAMING_SNAKE_CASE_: set , SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: PriorityQueue , SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: float | int , ) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A__ = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf )
A__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A__ = new_cost_f
A__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: dict ) -> int:
'''simple docstring'''
A__ = -1
A__ = set()
A__ = set()
A__ = {source: 0}
A__ = {destination: 0}
A__ = {source: None}
A__ = {destination: None}
A__ = PriorityQueue()
A__ = PriorityQueue()
A__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A__ , A__ = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE_ )
A__ , A__ = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE_ )
A__ = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
A__ = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A__ = shortest_distance
return shortest_path_distance
lowerCAmelCase__ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
lowerCAmelCase__ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 514 | 0 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase = False
lowercase = logging.get_logger(__name__)
lowercase = '''ybelkada/fonts'''
def UpperCAmelCase_ ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
requires_backends(lowercase__ , ["torch"] )
_check_torch_version()
a_ =image_tensor.unsqueeze(0 )
a_ =torch.nn.functional.unfold(lowercase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
a_ =patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowercase__ , lowercase__ , -1 )
a_ =patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( lowercase__ , lowercase__ = 3_6 , lowercase__ = "black" , lowercase__ = "white" , lowercase__ = 5 , lowercase__ = 5 , lowercase__ = 5 , lowercase__ = 5 , lowercase__ = None , lowercase__ = None , ):
'''simple docstring'''
requires_backends(lowercase__ , "vision" )
# Add new lines so that each line is no more than 80 characters.
a_ =textwrap.TextWrapper(width=8_0 )
a_ =wrapper.wrap(text=lowercase__ )
a_ ="\n".join(lowercase__ )
if font_bytes is not None and font_path is None:
a_ =io.BytesIO(lowercase__ )
elif font_path is not None:
a_ =font_path
else:
a_ =hf_hub_download(lowercase__ , "Arial.TTF" )
a_ =ImageFont.truetype(lowercase__ , encoding="UTF-8" , size=lowercase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
a_ =ImageDraw.Draw(Image.new("RGB" , (1, 1) , lowercase__ ) )
a_ , a_ , a_ , a_ =temp_draw.textbbox((0, 0) , lowercase__ , lowercase__ )
# Create the actual image with a bit of padding around the text.
a_ =text_width + left_padding + right_padding
a_ =text_height + top_padding + bottom_padding
a_ =Image.new("RGB" , (image_width, image_height) , lowercase__ )
a_ =ImageDraw.Draw(lowercase__ )
draw.text(xy=(left_padding, top_padding) , text=lowercase__ , fill=lowercase__ , font=lowercase__ )
return image
def UpperCAmelCase_ ( lowercase__ , lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(lowercase__ , "vision" )
# Convert to PIL image if necessary
a_ =to_pil_image(lowercase__ )
a_ =render_text(lowercase__ , **lowercase__ )
a_ =max(header_image.width , image.width )
a_ =int(image.height * (new_width / image.width) )
a_ =int(header_image.height * (new_width / header_image.width) )
a_ =Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
a_ =to_numpy_array(lowercase__ )
if infer_channel_dimension_format(lowercase__ ) == ChannelDimension.LAST:
a_ =to_channel_dimension_format(lowercase__ , ChannelDimension.LAST )
return new_image
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = ["flattened_patches"]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 2_0_4_8 , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
a_ =do_normalize
a_ =do_convert_rgb
a_ =max_patches
a_ =is_vqa
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , "torch")
_check_torch_version()
# convert to torch
a_ =to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.FIRST)
a_ =torch.from_numpy(lowerCAmelCase_)
a_ , a_ =patch_size["height"], patch_size["width"]
a_ , a_ =get_image_size(lowerCAmelCase_)
# maximize scale s.t.
a_ =math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
a_ =max(min(math.floor(scale * image_height / patch_height) , lowerCAmelCase_) , 1)
a_ =max(min(math.floor(scale * image_width / patch_width) , lowerCAmelCase_) , 1)
a_ =max(num_feasible_rows * patch_height , 1)
a_ =max(num_feasible_cols * patch_width , 1)
a_ =torch.nn.functional.interpolate(
image.unsqueeze(0) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=lowerCAmelCase_ , antialias=lowerCAmelCase_ , ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
a_ =torch_extract_patches(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
a_ =patches.shape
a_ =patches_shape[1]
a_ =patches_shape[2]
a_ =patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
a_ =patches.reshape([rows * columns, depth])
# [rows * columns, 1]
a_ =torch.arange(lowerCAmelCase_).reshape([rows, 1]).repeat(1 , lowerCAmelCase_).reshape([rows * columns, 1])
a_ =torch.arange(lowerCAmelCase_).reshape([1, columns]).repeat(lowerCAmelCase_ , 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
a_ =row_ids.to(torch.floataa)
a_ =col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
a_ =torch.cat([row_ids, col_ids, patches] , -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
a_ =torch.nn.functional.pad(lowerCAmelCase_ , [0, 0, 0, max_patches - (rows * columns)]).float()
a_ =to_numpy_array(lowerCAmelCase_)
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
a_ =image.astype(np.floataa)
# take mean across the whole `image`
a_ =np.mean(lowerCAmelCase_)
a_ =np.std(lowerCAmelCase_)
a_ =max(lowerCAmelCase_ , 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> ImageInput:
"""simple docstring"""
a_ =do_normalize if do_normalize is not None else self.do_normalize
a_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ =patch_size if patch_size is not None else self.patch_size
a_ =max_patches if max_patches is not None else self.max_patches
a_ =self.is_vqa
if kwargs.get("data_format" , lowerCAmelCase_) is not None:
raise ValueError("data_format is not an accepted input as the outputs are ")
a_ =make_list_of_images(lowerCAmelCase_)
if not valid_images(lowerCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ =[convert_to_rgb(lowerCAmelCase_) for image in images]
# All transformations expect numpy arrays.
a_ =[to_numpy_array(lowerCAmelCase_) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models.")
a_ =kwargs.pop("font_bytes" , lowerCAmelCase_)
a_ =kwargs.pop("font_path" , lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =[header_text] * len(lowerCAmelCase_)
a_ =[
render_header(lowerCAmelCase_ , header_text[i] , font_bytes=lowerCAmelCase_ , font_path=lowerCAmelCase_)
for i, image in enumerate(lowerCAmelCase_)
]
if do_normalize:
a_ =[self.normalize(image=lowerCAmelCase_) for image in images]
# convert to torch tensor and permute
a_ =[
self.extract_flattened_patches(image=lowerCAmelCase_ , max_patches=lowerCAmelCase_ , patch_size=lowerCAmelCase_)
for image in images
]
# create attention mask in numpy
a_ =[(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
a_ =BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=lowerCAmelCase_)
return encoded_outputs
| 720 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41 | 0 |
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowercase: Any = mf_knapsack(i - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
else:
_lowercase: Dict = max(
mf_knapsack(i - 1 , __magic_name__ , __magic_name__ , __magic_name__ ) , mf_knapsack(i - 1 , __magic_name__ , __magic_name__ , j - wt[i - 1] ) + val[i - 1] , )
_lowercase: str = val
return f[i][j]
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: Optional[int] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_lowercase: str = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_lowercase: str = dp[i - 1][w_]
return dp[n][w_], dp
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not (isinstance(__magic_name__ , (list, tuple) ) and isinstance(__magic_name__ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
_lowercase: Optional[int] = len(__magic_name__ )
if num_items != len(__magic_name__ ):
_lowercase: List[str] = (
"The number of weights must be the same as the number of values.\n"
f"But got {num_items} weights and {len(__magic_name__ )} values"
)
raise ValueError(__magic_name__ )
for i in range(__magic_name__ ):
if not isinstance(wt[i] , __magic_name__ ):
_lowercase: List[Any] = (
"All weights must be integers but got weight of "
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(__magic_name__ )
_lowercase , _lowercase: int = knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowercase: int = set()
_construct_solution(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return optimal_val, example_optional_set
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__magic_name__ , __magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
else:
optimal_set.add(__magic_name__ )
_construct_solution(__magic_name__ , __magic_name__ , i - 1 , j - wt[i - 1] , __magic_name__ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = [3, 2, 4, 4]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 3, 2, 3]
_SCREAMING_SNAKE_CASE : Optional[int] = 4
_SCREAMING_SNAKE_CASE : Optional[int] = 6
_SCREAMING_SNAKE_CASE : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_SCREAMING_SNAKE_CASE : str = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_SCREAMING_SNAKE_CASE : Dict = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 226 | """simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__:str = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__:List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__:Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__:Tuple = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 528 | 0 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase ( UpperCamelCase_ ):
_lowerCamelCase : BigBirdConfig
_lowerCamelCase : jnp.dtype= jnp.floataa
_lowerCamelCase : bool= True
def _snake_case ( self) -> Dict:
super().setup()
UpperCAmelCase_ : str = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *_snake_case , **_snake_case) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = super().__call__(*_a , **_a)
UpperCAmelCase_ : List[Any] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class lowercase ( UpperCamelCase_ ):
_lowerCamelCase : List[str]= FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
def cross_entropy(UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ):
UpperCAmelCase_ : Dict = logits.shape[-1]
UpperCAmelCase_ : str = (labels[..., None] == jnp.arange(__lowerCAmelCase )[None]).astype('f4' )
UpperCAmelCase_ : Optional[Any] = jax.nn.log_softmax(__lowerCAmelCase ,axis=-1 )
UpperCAmelCase_ : int = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
UpperCAmelCase_ : str = reduction(__lowerCAmelCase )
return loss
UpperCAmelCase_ : List[str] = partial(__lowerCAmelCase ,reduction=jnp.mean )
UpperCAmelCase_ : str = cross_entropy(__lowerCAmelCase ,__lowerCAmelCase )
UpperCAmelCase_ : Union[str, Any] = cross_entropy(__lowerCAmelCase ,__lowerCAmelCase )
UpperCAmelCase_ : List[Any] = cross_entropy(__lowerCAmelCase ,__lowerCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase :
_lowerCamelCase : str= "google/bigbird-roberta-base"
_lowerCamelCase : int= 3000
_lowerCamelCase : int= 10500
_lowerCamelCase : int= 128
_lowerCamelCase : int= 3
_lowerCamelCase : int= 1
_lowerCamelCase : int= 5
# tx_args
_lowerCamelCase : float= 3e-5
_lowerCamelCase : float= 0.0
_lowerCamelCase : int= 20000
_lowerCamelCase : float= 0.0095
_lowerCamelCase : str= "bigbird-roberta-natural-questions"
_lowerCamelCase : str= "training-expt"
_lowerCamelCase : str= "data/nq-training.jsonl"
_lowerCamelCase : str= "data/nq-validation.jsonl"
def _snake_case ( self) -> Optional[int]:
os.makedirs(self.base_dir , exist_ok=_a)
UpperCAmelCase_ : Tuple = os.path.join(self.base_dir , self.save_dir)
UpperCAmelCase_ : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase :
_lowerCamelCase : int
_lowerCamelCase : int= 4096 # no dynamic padding on TPUs
def __call__( self , _snake_case) -> int:
UpperCAmelCase_ : int = self.collate_fn(_a)
UpperCAmelCase_ : List[str] = jax.tree_util.tree_map(_a , _a)
return batch
def _snake_case ( self , _snake_case) -> Optional[int]:
UpperCAmelCase_ : int = self.fetch_inputs(features['input_ids'])
UpperCAmelCase_ : Tuple = {
"""input_ids""": jnp.array(_a , dtype=jnp.intaa),
"""attention_mask""": jnp.array(_a , dtype=jnp.intaa),
"""start_labels""": jnp.array(features['start_token'] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features['end_token'] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def _snake_case ( self , _snake_case) -> List[str]:
UpperCAmelCase_ : str = [self._fetch_inputs(_a) for ids in input_ids]
return zip(*_a)
def _snake_case ( self , _snake_case) -> int:
UpperCAmelCase_ : List[str] = [1 for _ in range(len(_a))]
while len(_a) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ) -> List[str]:
'''simple docstring'''
if seed is not None:
UpperCAmelCase_ : Optional[int] = dataset.shuffle(seed=__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) // batch_size ):
UpperCAmelCase_ : Optional[int] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__lowerCAmelCase )
@partial(jax.pmap ,axis_name='batch' )
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ) -> Tuple:
'''simple docstring'''
def loss_fn(UpperCamelCase ):
UpperCAmelCase_ : Any = model_inputs.pop('start_labels' )
UpperCAmelCase_ : Optional[int] = model_inputs.pop('end_labels' )
UpperCAmelCase_ : str = model_inputs.pop('pooled_labels' )
UpperCAmelCase_ : Dict = state.apply_fn(**__lowerCAmelCase ,params=__lowerCAmelCase ,dropout_rng=__lowerCAmelCase ,train=__lowerCAmelCase )
UpperCAmelCase_ : List[str] = outputs
return state.loss_fn(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,)
UpperCAmelCase_ : Union[str, Any] = jax.random.split(__lowerCAmelCase )
UpperCAmelCase_ : int = jax.value_and_grad(__lowerCAmelCase )
UpperCAmelCase_ : Tuple = grad_fn(state.params )
UpperCAmelCase_ : Optional[int] = jax.lax.pmean({'loss': loss} ,axis_name='batch' )
UpperCAmelCase_ : int = jax.lax.pmean(__lowerCAmelCase ,'batch' )
UpperCAmelCase_ : Optional[int] = state.apply_gradients(grads=__lowerCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name='batch' )
def SCREAMING_SNAKE_CASE( UpperCamelCase ,**UpperCamelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ : Dict = model_inputs.pop('start_labels' )
UpperCAmelCase_ : Optional[int] = model_inputs.pop('end_labels' )
UpperCAmelCase_ : List[str] = model_inputs.pop('pooled_labels' )
UpperCAmelCase_ : Dict = state.apply_fn(**__lowerCAmelCase ,params=state.params ,train=__lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = outputs
UpperCAmelCase_ : Any = state.loss_fn(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
UpperCAmelCase_ : List[str] = jax.lax.pmean({'loss': loss} ,axis_name='batch' )
return metrics
class lowercase ( train_state.TrainState ):
_lowerCamelCase : Callable= struct.field(pytree_node=UpperCamelCase_ )
@dataclass
class lowercase :
_lowerCamelCase : Args
_lowerCamelCase : Callable
_lowerCamelCase : Callable
_lowerCamelCase : Callable
_lowerCamelCase : Callable
_lowerCamelCase : wandb
_lowerCamelCase : Callable= None
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case=None) -> str:
UpperCAmelCase_ : List[Any] = model.params
UpperCAmelCase_ : Any = TrainState.create(
apply_fn=model.__call__ , params=_a , tx=_a , loss_fn=_a , )
if ckpt_dir is not None:
UpperCAmelCase_ : List[str] = restore_checkpoint(_a , _a)
UpperCAmelCase_ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
UpperCAmelCase_ : Union[str, Any] = build_tx(**_a)
UpperCAmelCase_ : int = train_state.TrainState(
step=_a , apply_fn=model.__call__ , params=_a , tx=_a , opt_state=_a , )
UpperCAmelCase_ : int = args
UpperCAmelCase_ : Optional[Any] = data_collator
UpperCAmelCase_ : Union[str, Any] = lr
UpperCAmelCase_ : int = params
UpperCAmelCase_ : str = jax_utils.replicate(_a)
return state
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> Optional[Any]:
UpperCAmelCase_ : int = self.args
UpperCAmelCase_ : int = len(_a) // args.batch_size
UpperCAmelCase_ : Union[str, Any] = jax.random.PRNGKey(0)
UpperCAmelCase_ : Union[str, Any] = jax.random.split(_a , jax.device_count())
for epoch in range(args.max_epochs):
UpperCAmelCase_ : Optional[int] = jnp.array(0 , dtype=jnp.floataa)
UpperCAmelCase_ : int = get_batched_dataset(_a , args.batch_size , seed=_a)
UpperCAmelCase_ : int = 0
for batch in tqdm(_a , total=_a , desc=F"""Running EPOCH-{epoch}"""):
UpperCAmelCase_ : Union[str, Any] = self.data_collator(_a)
UpperCAmelCase_ : Dict = self.train_step_fn(_a , _a , **_a)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
UpperCAmelCase_ : Any = jax_utils.unreplicate(state.step)
UpperCAmelCase_ : Optional[Any] = running_loss.item() / i
UpperCAmelCase_ : Dict = self.scheduler_fn(state_step - 1)
UpperCAmelCase_ : Optional[Any] = self.evaluate(_a , _a)
UpperCAmelCase_ : Optional[int] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(_a))
self.logger.log(_a , commit=_a)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=_a)
def _snake_case ( self , _snake_case , _snake_case) -> int:
UpperCAmelCase_ : Optional[Any] = get_batched_dataset(_a , self.args.batch_size)
UpperCAmelCase_ : Optional[Any] = len(_a) // self.args.batch_size
UpperCAmelCase_ : Optional[Any] = jnp.array(0 , dtype=jnp.floataa)
UpperCAmelCase_ : Union[str, Any] = 0
for batch in tqdm(_a , total=_a , desc='Evaluating ... '):
UpperCAmelCase_ : Optional[Any] = self.data_collator(_a)
UpperCAmelCase_ : List[str] = self.val_step_fn(_a , **_a)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def _snake_case ( self , _snake_case , _snake_case) -> List[str]:
UpperCAmelCase_ : Dict = jax_utils.unreplicate(_a)
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=' ... ')
self.model_save_fn(_a , params=state.params)
with open(os.path.join(_a , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(_a , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(_a , 'data_collator.joblib'))
with open(os.path.join(_a , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , _a)
print('DONE')
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> str:
'''simple docstring'''
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" ,end=' ... ' )
with open(os.path.join(__lowerCAmelCase ,'flax_model.msgpack' ) ,'rb' ) as f:
UpperCAmelCase_ : Optional[Any] = from_bytes(state.params ,f.read() )
with open(os.path.join(__lowerCAmelCase ,'opt_state.msgpack' ) ,'rb' ) as f:
UpperCAmelCase_ : Dict = from_bytes(state.opt_state ,f.read() )
UpperCAmelCase_ : Any = joblib.load(os.path.join(__lowerCAmelCase ,'args.joblib' ) )
UpperCAmelCase_ : Dict = joblib.load(os.path.join(__lowerCAmelCase ,'data_collator.joblib' ) )
with open(os.path.join(__lowerCAmelCase ,'training_state.json' ) ,'r' ) as f:
UpperCAmelCase_ : str = json.load(__lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = training_state["""step"""]
print('DONE' )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ : int = num_train_steps - warmup_steps
UpperCAmelCase_ : Union[str, Any] = optax.linear_schedule(init_value=__lowerCAmelCase ,end_value=__lowerCAmelCase ,transition_steps=__lowerCAmelCase )
UpperCAmelCase_ : int = optax.linear_schedule(init_value=__lowerCAmelCase ,end_value=1e-7 ,transition_steps=__lowerCAmelCase )
UpperCAmelCase_ : Dict = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
'''simple docstring'''
def weight_decay_mask(UpperCamelCase ):
UpperCAmelCase_ : List[str] = traverse_util.flatten_dict(__lowerCAmelCase )
UpperCAmelCase_ : Dict = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(__lowerCAmelCase )
UpperCAmelCase_ : List[Any] = scheduler_fn(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
UpperCAmelCase_ : List[str] = optax.adamw(learning_rate=__lowerCAmelCase ,weight_decay=__lowerCAmelCase ,mask=__lowerCAmelCase )
return tx, lr
| 712 |
'''simple docstring'''
import numpy
class lowercase :
def __init__( self , _snake_case , _snake_case) -> None:
UpperCAmelCase_ : Optional[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase_ : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase_ : List[str] = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase_ : Dict = numpy.random.rand(3 , 1)
# Real output values provided.
UpperCAmelCase_ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase_ : Union[str, Any] = numpy.zeros(output_array.shape)
def _snake_case ( self) -> numpy.ndarray:
UpperCAmelCase_ : Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase_ : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase_ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def _snake_case ( self) -> None:
UpperCAmelCase_ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
UpperCAmelCase_ : Any = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
UpperCAmelCase_ : Tuple = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> None:
for iteration in range(1 , iterations + 1):
UpperCAmelCase_ : int = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase_ : List[Any] = numpy.mean(numpy.square(output - self.feedforward()))
print(F"""Iteration {iteration} Loss: {loss}""")
def _snake_case ( self , _snake_case) -> int:
UpperCAmelCase_ : Optional[int] = input_arr
UpperCAmelCase_ : Tuple = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
UpperCAmelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
UpperCAmelCase_ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> numpy.ndarray:
return (value) * (1 - (value))
def SCREAMING_SNAKE_CASE( ) -> int:
UpperCAmelCase_ : Optional[int] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) ,dtype=numpy.floataa ,)
# True output values for the given input values.
UpperCAmelCase_ : Dict = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) ,dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase_ : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=UpperCamelCase ,output_array=UpperCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCamelCase ,iterations=1_0 ,give_loss=UpperCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) ,dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 471 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class lowercase__ ( _a ):
a_ =['''pixel_values''']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , )-> None:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ = size if size is not None else {'''shortest_edge''': 224}
lowerCAmelCase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
lowerCAmelCase__ = get_size_dict(__UpperCAmelCase , param_name="crop_size" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_flip_channel_order
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PIL.Image.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , )-> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase__ = get_resize_output_image_size(__UpperCAmelCase , size=size["shortest_edge"] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , )-> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , )-> Optional[int]:
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> np.ndarray:
'''simple docstring'''
return flip_channel_order(__UpperCAmelCase , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , )-> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(__UpperCAmelCase , param_name="crop_size" )
lowerCAmelCase__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowerCAmelCase__ = [self.flip_channel_order(image=__UpperCAmelCase ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase__ = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__UpperCAmelCase ):
lowerCAmelCase__ = target_sizes.numpy()
lowerCAmelCase__ = []
for idx in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__UpperCAmelCase )
lowerCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCAmelCase )
else:
lowerCAmelCase__ = logits.argmax(dim=1 )
lowerCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 339 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
return setitem, k, v
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return delitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str:
"""simple docstring"""
try:
return fun(_UpperCamelCase , *_UpperCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase_ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase_ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Any = HashMap(initial_block_size=4 )
snake_case_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(_UpperCamelCase ):
snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
assert my_res == py_res
assert str(_UpperCamelCase ) == str(_UpperCamelCase )
assert set(_UpperCamelCase ) == set(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
def is_public(_UpperCamelCase ) -> bool:
return not name.startswith('''_''' )
snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )}
snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )}
assert dict_public_names > hash_public_names
| 60 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
"""simple docstring"""
def get_matched_characters(lowerCamelCase_ : str , lowerCamelCase_ : str ) -> str:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Dict = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase_ : List[Any] = int(max(0 , i - limit ) )
UpperCAmelCase_ : str = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = F'''{_stra[0:_stra.index(lowerCamelCase_ )]} {_stra[_stra.index(lowerCamelCase_ ) + 1:]}'''
return "".join(lowerCamelCase_ )
# matching characters
UpperCAmelCase_ : Union[str, Any] = get_matched_characters(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : Dict = get_matched_characters(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : List[str] = len(lowerCamelCase_ )
# transposition
UpperCAmelCase_ : int = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase_ , lowerCamelCase_ ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase_ : int = 0.0
else:
UpperCAmelCase_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCamelCase_ )
+ match_count / len(lowerCamelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase_ : Optional[int] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 389 | '''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : int = [[float('inf' ) for _ in range(lowerCamelCase_ )] for _ in range(lowerCamelCase_ )]
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
UpperCAmelCase_ : int = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase_ ):
# looping through rows of graph array
for i in range(lowerCamelCase_ ):
# looping through columns of graph array
for j in range(lowerCamelCase_ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCAmelCase_ : Optional[int] = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase_ , lowerCamelCase_ )
return dist, v
if __name__ == "__main__":
snake_case__ : List[Any] = int(input('''Enter number of vertices: '''))
snake_case__ : List[Any] = int(input('''Enter number of edges: '''))
snake_case__ : str = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
snake_case__ : Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
snake_case__ : List[str] = int(input('''Enter source:'''))
snake_case__ : Tuple = int(input('''Enter destination:'''))
snake_case__ : List[str] = float(input('''Enter weight:'''))
snake_case__ : Optional[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 389 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCAmelCase__ ( lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int , lowerCamelCase_ : float = 1 , lowerCamelCase_ : float = 1 , lowerCamelCase_ : float = 1.0e4 , lowerCamelCase_ : bool = False , lowerCamelCase_ : float = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
__a : Dict = float(embedding_dim // 2 )
__a : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__a : Any = min_timescale * jnp.exp(jnp.arange(lowerCamelCase_ , dtype=jnp.floataa ) * -log_timescale_increment )
__a : List[Any] = jnp.expand_dims(lowerCamelCase_ , 1 ) * jnp.expand_dims(lowerCamelCase_ , 0 )
# scale embeddings
__a : Optional[int] = scale * emb
if flip_sin_to_cos:
__a : Any = jnp.concatenate([jnp.cos(lowerCamelCase_ ), jnp.sin(lowerCamelCase_ )] , axis=1 )
else:
__a : Tuple = jnp.concatenate([jnp.sin(lowerCamelCase_ ), jnp.cos(lowerCamelCase_ )] , axis=1 )
__a : str = jnp.reshape(lowerCamelCase_ , [jnp.shape(lowerCamelCase_ )[0], embedding_dim] )
return signal
class _UpperCamelCase( nn.Module ):
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ )
__a : str = nn.silu(SCREAMING_SNAKE_CASE__ )
__a : Tuple = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ )
return temb
class _UpperCamelCase( nn.Module ):
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 1
@nn.compact
def __call__( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return get_sinusoidal_embeddings(
SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 47 |
import os
UpperCamelCase__ = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def UpperCamelCase__ ( UpperCAmelCase_ ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = 0
_lowercase : Dict = 0
while index < len(UpperCAmelCase_ ) - 1:
_lowercase : Any = SYMBOLS[numerals[index]]
_lowercase : List[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase__ ( UpperCAmelCase_ ) -> str:
'''simple docstring'''
_lowercase : List[str] = ''''''
_lowercase : Union[str, Any] = num // 1000
numerals += m_count * "M"
num %= 1000
_lowercase : Tuple = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_lowercase : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase__ ( UpperCAmelCase_ = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_lowercase : List[str] = 0
with open(os.path.dirname(UpperCAmelCase_ ) + roman_numerals_filename ) as filea:
_lowercase : Optional[Any] = filea.readlines()
for line in lines:
_lowercase : int = line.strip()
_lowercase : Dict = parse_roman_numerals(UpperCAmelCase_ )
_lowercase : Optional[Any] = generate_roman_numerals(UpperCAmelCase_ )
savings += len(UpperCAmelCase_ ) - len(UpperCAmelCase_ )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""") | 322 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = UnCLIPImageVariationPipeline
__UpperCAmelCase : Optional[int] = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
__UpperCAmelCase : Tuple = IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : int = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
__UpperCAmelCase : str = False
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return 100
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowercase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_a : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModelWithProjection(_a )
@property
def __lowercase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_a : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=32 ,intermediate_size=37 ,patch_size=1 ,)
return CLIPVisionModelWithProjection(_a )
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : List[str] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
_a : int = UnCLIPTextProjModel(**_a )
return model
@property
def __lowercase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Any = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
_a : Optional[int] = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Optional[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(1 )
_a : List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.dummy_decoder
_a : Dict = self.dummy_text_proj
_a : str = self.dummy_text_encoder
_a : Any = self.dummy_tokenizer
_a : List[Any] = self.dummy_super_res_first
_a : Any = self.dummy_super_res_last
_a : Optional[Any] = UnCLIPScheduler(
variance_type='learned_range' ,prediction_type='epsilon' ,num_train_timesteps=1000 ,)
_a : Union[str, Any] = UnCLIPScheduler(
variance_type='fixed_small_log' ,prediction_type='epsilon' ,num_train_timesteps=1000 ,)
_a : Tuple = CLIPImageProcessor(crop_size=32 ,size=32 )
_a : List[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __lowercase ( self : int ,_a : str ,_a : Any=0 ,_a : str=True ):
'''simple docstring'''
_a : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('mps' ):
_a : int = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
if pil_image:
_a : Tuple = input_image * 0.5 + 0.5
_a : Optional[Any] = input_image.clamp(0 ,1 )
_a : int = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
_a : List[Any] = DiffusionPipeline.numpy_to_pil(_a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __lowercase ( self : int ):
'''simple docstring'''
_a : Dict = 'cpu'
_a : str = self.get_dummy_components()
_a : Optional[int] = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : str = self.get_dummy_inputs(_a ,pil_image=_a )
_a : int = pipe(**_a )
_a : Any = output.images
_a : Optional[Any] = self.get_dummy_inputs(_a ,pil_image=_a )
_a : Tuple = pipe(
**_a ,return_dict=_a ,)[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Dict = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = 'cpu'
_a : Optional[Any] = self.get_dummy_components()
_a : Optional[int] = self.pipeline_class(**_a )
_a : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Optional[int] = self.get_dummy_inputs(_a ,pil_image=_a )
_a : Any = pipe(**_a )
_a : Dict = output.images
_a : Optional[Any] = self.get_dummy_inputs(_a ,pil_image=_a )
_a : Union[str, Any] = pipe(
**_a ,return_dict=_a ,)[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Dict = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = 'cpu'
_a : List[Any] = self.get_dummy_components()
_a : int = self.pipeline_class(**_a )
_a : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Tuple = self.get_dummy_inputs(_a ,pil_image=_a )
_a : str = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
_a : List[Any] = pipe(**_a )
_a : List[Any] = output.images
_a : Dict = self.get_dummy_inputs(_a ,pil_image=_a )
_a : Optional[int] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
_a : List[str] = pipe(
**_a ,return_dict=_a ,)[0]
_a : List[str] = image[0, -3:, -3:, -1]
_a : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_a : str = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : str = torch.device('cpu' )
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : List[Any] = 1
_a : Any = self.get_dummy_components()
_a : int = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Optional[Any] = torch.Generator(device=_a ).manual_seed(0 )
_a : Optional[Any] = pipe.decoder.dtype
_a : Tuple = 1
_a : Dict = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_a : Any = pipe.prepare_latents(
_a ,dtype=_a ,device=_a ,generator=_a ,latents=_a ,scheduler=DummyScheduler() )
_a : str = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_a : int = pipe.prepare_latents(
_a ,dtype=_a ,device=_a ,generator=_a ,latents=_a ,scheduler=DummyScheduler() )
_a : Union[str, Any] = self.get_dummy_inputs(_a ,pil_image=_a )
_a : Any = pipe(
**_a ,decoder_latents=_a ,super_res_latents=_a ).images
_a : List[str] = self.get_dummy_inputs(_a ,pil_image=_a )
# Don't pass image, instead pass embedding
_a : Optional[int] = pipeline_inputs.pop('image' )
_a : str = pipe.image_encoder(_a ).image_embeds
_a : Union[str, Any] = pipe(
**_a ,decoder_latents=_a ,super_res_latents=_a ,image_embeddings=_a ,).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_a : Tuple = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=_a ,expected_max_diff=_a )
@skip_mps
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : str = torch_device == 'cpu'
_a : Tuple = True
_a : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=_a ,relax_max_difference=_a ,additional_params_copy_to_batched_inputs=_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Union[str, Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_a : Dict = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_a ,additional_params_copy_to_batched_inputs=_a ,)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_a )
@skip_mps
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowercase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowercase ( self : Tuple ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
_a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
_a : Any = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' ,torch_dtype=torch.floataa )
_a : List[Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_a : Optional[int] = pipeline(
_a ,generator=_a ,output_type='np' ,)
_a : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(_a ,_a ,15 )
| 319 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 | 1 |
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ = 1_00 ) ->int:
lowercase_ = 0
lowercase_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 451 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
def __init__( self : int , *lowercase_ : Tuple , **lowercase_ : List[Any] ):
'''simple docstring'''
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 451 | 1 |
'''simple docstring'''
import os
import sys
lowercase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase__ : Any = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __lowerCamelCase ( *_UpperCamelCase : int , **_UpperCamelCase : int ):
'''simple docstring'''
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __lowerCamelCase ( *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __lowerCamelCase ( *_UpperCamelCase : str , **_UpperCamelCase : str ):
'''simple docstring'''
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __lowerCamelCase ( *_UpperCamelCase : Optional[int] , **_UpperCamelCase : List[str] ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __lowerCamelCase ( *_UpperCamelCase : str , **_UpperCamelCase : List[Any] ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __lowerCamelCase ( *_UpperCamelCase : int , **_UpperCamelCase : Any ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __lowerCamelCase ( *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[int] ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 719 | '''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase_ = False
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase_ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class _UpperCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
a_ : Optional[int] = "table-transformer"
a_ : List[str] = ["past_key_values"]
a_ : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict , _lowerCamelCase : Dict=True , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : List[str]=1_0_0 , _lowerCamelCase : Optional[Any]=6 , _lowerCamelCase : Optional[int]=2_0_4_8 , _lowerCamelCase : Optional[Any]=8 , _lowerCamelCase : int=6 , _lowerCamelCase : Optional[Any]=2_0_4_8 , _lowerCamelCase : List[Any]=8 , _lowerCamelCase : Optional[Any]=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]="relu" , _lowerCamelCase : List[str]=2_5_6 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : int=0.0 , _lowerCamelCase : str=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int="sine" , _lowerCamelCase : Union[str, Any]="resnet50" , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any=False , _lowerCamelCase : List[str]=1 , _lowerCamelCase : List[str]=5 , _lowerCamelCase : Any=2 , _lowerCamelCase : Union[str, Any]=1 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Optional[Any]=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Any=0.1 , **_lowerCamelCase : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__lowerCamelCase : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowerCamelCase : Optional[Any] = backbone_config.get("""model_type""" )
__lowerCamelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : Any = config_class.from_dict(lowerCamelCase_ )
# set timm attributes to None
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = None, None, None
__lowerCamelCase : Optional[int] = use_timm_backbone
__lowerCamelCase : Optional[int] = backbone_config
__lowerCamelCase : str = num_channels
__lowerCamelCase : Union[str, Any] = num_queries
__lowerCamelCase : Optional[int] = d_model
__lowerCamelCase : Any = encoder_ffn_dim
__lowerCamelCase : Tuple = encoder_layers
__lowerCamelCase : Dict = encoder_attention_heads
__lowerCamelCase : str = decoder_ffn_dim
__lowerCamelCase : int = decoder_layers
__lowerCamelCase : Union[str, Any] = decoder_attention_heads
__lowerCamelCase : Dict = dropout
__lowerCamelCase : List[str] = attention_dropout
__lowerCamelCase : Optional[int] = activation_dropout
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : Any = init_std
__lowerCamelCase : List[Any] = init_xavier_std
__lowerCamelCase : int = encoder_layerdrop
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : Dict = encoder_layers
__lowerCamelCase : Optional[int] = auxiliary_loss
__lowerCamelCase : int = position_embedding_type
__lowerCamelCase : int = backbone
__lowerCamelCase : Optional[Any] = use_pretrained_backbone
__lowerCamelCase : Dict = dilation
# Hungarian matcher
__lowerCamelCase : List[Any] = class_cost
__lowerCamelCase : List[str] = bbox_cost
__lowerCamelCase : Union[str, Any] = giou_cost
# Loss coefficients
__lowerCamelCase : Union[str, Any] = mask_loss_coefficient
__lowerCamelCase : Union[str, Any] = dice_loss_coefficient
__lowerCamelCase : Optional[int] = bbox_loss_coefficient
__lowerCamelCase : Any = giou_loss_coefficient
__lowerCamelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def _snake_case ( self : Dict ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return self.d_model
class _UpperCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
a_ : Dict = version.parse("1.11" )
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
return 1E-5
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return 1_2
| 519 | import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = 3
UpperCamelCase = 250
UpperCamelCase = ids_tensor((batch_size, length) , lowerCamelCase_ )
UpperCamelCase = torch.ones((batch_size, length) , device=lowerCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxLengthCriteria(max_length=10 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
| 537 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Optional[Any] = AutoencoderKL
__lowerCamelCase : str = "sample"
__lowerCamelCase : int = 1e-2
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ : str = 4
a__ : str = 3
a__ : Union[str, Any] = (32, 32)
a__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
return {"sample": image}
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
a__ : int = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
a__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
a__ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
a__ : int = self.model_class(**a_ )
model.to(a_ )
assert not model.is_gradient_checkpointing and model.training
a__ : int = model(**a_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
a__ : Optional[int] = torch.randn_like(a_ )
a__ : List[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
a__ : Union[str, Any] = self.model_class(**a_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(a_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
a__ : Tuple = model_a(**a_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
a__ : str = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
a__ : Any = dict(model.named_parameters() )
a__ : Any = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(a_ )
a__ : str = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
a__ : List[str] = model.to(a_ )
model.eval()
if torch_device == "mps":
a__ : Any = torch.manual_seed(0 )
else:
a__ : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
a__ : Optional[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
a__ : Any = image.to(a_ )
with torch.no_grad():
a__ : str = model(a_ , sample_posterior=a_ , generator=a_ ).sample
a__ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
a__ : Dict = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
a__ : Dict = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
a__ : List[str] = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(a_ , a_ , rtol=1E-2 ) )
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self : Tuple , a_ : Union[str, Any] , a_ : List[Any] ) -> str:
'''simple docstring'''
return F"gaussian_noise_s={seed}_shape={'_'.join([str(a_ ) for s in shape] )}.npy"
def UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[int] , a_ : Optional[int]=0 , a_ : Optional[Any]=(4, 3, 5_12, 5_12) , a_ : Optional[int]=False ) -> Any:
'''simple docstring'''
a__ : Dict = torch.floataa if fpaa else torch.floataa
a__ : int = torch.from_numpy(load_hf_numpy(self.get_file_format(a_ , a_ ) ) ).to(a_ ).to(a_ )
return image
def UpperCAmelCase ( self : Tuple , a_ : Tuple="CompVis/stable-diffusion-v1-4" , a_ : int=False ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = "fp16" if fpaa else None
a__ : int = torch.floataa if fpaa else torch.floataa
a__ : Union[str, Any] = AutoencoderKL.from_pretrained(
a_ , subfolder="vae" , torch_dtype=a_ , revision=a_ , )
model.to(a_ ).eval()
return model
def UpperCAmelCase ( self : Optional[Any] , a_ : Tuple=0 ) -> int:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(a_ )
return torch.Generator(device=a_ ).manual_seed(a_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCAmelCase ( self : Tuple , a_ : Tuple , a_ : str , a_ : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_sd_vae_model()
a__ : int = self.get_sd_image(a_ )
a__ : Tuple = self.get_generator(a_ )
with torch.no_grad():
a__ : int = model(a_ , generator=a_ , sample_posterior=a_ ).sample
assert sample.shape == image.shape
a__ : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a__ : str = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(a_ , a_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase ( self : int , a_ : Optional[int] , a_ : List[Any] ) -> Any:
'''simple docstring'''
a__ : int = self.get_sd_vae_model(fpaa=a_ )
a__ : List[Any] = self.get_sd_image(a_ , fpaa=a_ )
a__ : Tuple = self.get_generator(a_ )
with torch.no_grad():
a__ : str = model(a_ , generator=a_ , sample_posterior=a_ ).sample
assert sample.shape == image.shape
a__ : int = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a__ : List[str] = torch.tensor(a_ )
assert torch_all_close(a_ , a_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCAmelCase ( self : Dict , a_ : Optional[int] , a_ : List[str] , a_ : Dict ) -> List[str]:
'''simple docstring'''
a__ : Any = self.get_sd_vae_model()
a__ : Union[str, Any] = self.get_sd_image(a_ )
with torch.no_grad():
a__ : Optional[Any] = model(a_ ).sample
assert sample.shape == image.shape
a__ : str = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a__ : List[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(a_ , a_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase ( self : int , a_ : int , a_ : Union[str, Any] ) -> str:
'''simple docstring'''
a__ : Any = self.get_sd_vae_model()
a__ : List[str] = self.get_sd_image(a_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
a__ : Union[str, Any] = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
a__ : List[str] = sample[-1, -2:, :2, -2:].flatten().cpu()
a__ : Optional[Any] = torch.tensor(a_ )
assert torch_all_close(a_ , a_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase ( self : int , a_ : Dict , a_ : Any ) -> str:
'''simple docstring'''
a__ : int = self.get_sd_vae_model(fpaa=a_ )
a__ : Optional[Any] = self.get_sd_image(a_ , shape=(3, 4, 64, 64) , fpaa=a_ )
with torch.no_grad():
a__ : Dict = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
a__ : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a__ : str = torch.tensor(a_ )
assert torch_all_close(a_ , a_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCAmelCase ( self : Optional[int] , a_ : Tuple ) -> str:
'''simple docstring'''
a__ : List[str] = self.get_sd_vae_model(fpaa=a_ )
a__ : List[Any] = self.get_sd_image(a_ , shape=(3, 4, 64, 64) , fpaa=a_ )
with torch.no_grad():
a__ : Dict = model.decode(a_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a__ : Any = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(a_ , a_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCAmelCase ( self : Optional[int] , a_ : List[Any] ) -> Any:
'''simple docstring'''
a__ : str = self.get_sd_vae_model()
a__ : Optional[int] = self.get_sd_image(a_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
a__ : Dict = model.decode(a_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a__ : Any = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(a_ , a_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def UpperCAmelCase ( self : Dict , a_ : Optional[Any] , a_ : Any ) -> Tuple:
'''simple docstring'''
a__ : int = self.get_sd_vae_model()
a__ : List[Any] = self.get_sd_image(a_ )
a__ : Optional[Any] = self.get_generator(a_ )
with torch.no_grad():
a__ : Optional[int] = model.encode(a_ ).latent_dist
a__ : Dict = dist.sample(generator=a_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
a__ : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
a__ : Any = torch.tensor(a_ )
a__ : int = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(a_ , a_ , atol=a_ ) | 721 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] ) -> str:
'''simple docstring'''
a__ : List[str] = ""
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 251 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[list[str]] , snake_case__ : int , ):
A = len(snake_case__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(snake_case__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , snake_case__ , snake_case__ , )
def _snake_case ( snake_case__ : int ):
A = []
depth_first_search([] , [] , [] , snake_case__ , snake_case__ )
# Print all the boards
for board in boards:
for column in board:
print(snake_case__ )
print('' )
print(len(snake_case__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 91 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
snake_case_ = datasets.utils.logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class SCREAMING_SNAKE_CASE__ (folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase : Any = datasets.Audio()
__lowerCamelCase : List[Any] = """audio"""
__lowerCamelCase : List[Any] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : List[str] = AudioClassification(audio_column="""audio""" , label_column="""label""" )
snake_case_ = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
snake_case_ = AUDIO_EXTENSIONS
| 164 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
lowerCamelCase__ = TypeVar("""U""")
class A__ ( Generic[T, U] ):
def __init__( self : Dict , a : T | None , a : U | None ):
'''simple docstring'''
lowerCAmelCase__ : int = key
lowerCAmelCase__ : Dict = val
lowerCAmelCase__ : DoubleLinkedListNode[T, U] | None = None
lowerCAmelCase__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : Tuple ):
'''simple docstring'''
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class A__ ( Generic[T, U] ):
def __init__( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a )
lowerCAmelCase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.rear, self.head
def __repr__( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ['DoubleLinkedList']
lowerCAmelCase__ : Optional[int] = self.head
while node.next is not None:
rep.append(str(a ) )
lowerCAmelCase__ : int = node.next
rep.append(str(self.rear ) )
return ",\n ".join(a )
def _lowerCamelCase ( self : int , a : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowerCAmelCase__ : Optional[int] = node
lowerCAmelCase__ : str = previous
lowerCAmelCase__ : Any = node
lowerCAmelCase__ : int = self.rear
def _lowerCamelCase ( self : List[str] , a : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
lowerCAmelCase__ : List[Any] = node.next
lowerCAmelCase__ : List[str] = node.prev
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = None
return node
class A__ ( Generic[T, U] ):
lowercase = {}
def __init__( self : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowerCAmelCase__ : List[Any] = capacity
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Optional[int] , a : T ):
'''simple docstring'''
return key in self.cache
def _lowerCamelCase ( self : List[str] , a : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
lowerCAmelCase__ : DoubleLinkedListNode[T, U] = self.cache[key]
lowerCAmelCase__ : Optional[int] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(a )
return node.val
self.miss += 1
return None
def _lowerCamelCase ( self : List[Any] , a : T , a : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowerCAmelCase__ : Any = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowerCAmelCase__ : Optional[Any] = DoubleLinkedListNode(a , a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowerCAmelCase__ : Tuple = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowerCAmelCase__ : List[Any] = value
self.list.add(a )
@classmethod
def _lowerCamelCase ( cls : List[Any] , a : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowerCAmelCase__ : Union[str, Any] = LRUCache(a )
lowerCAmelCase__ : Tuple = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowerCAmelCase__ : List[str] = func(*a )
cls.decorator_function_to_instance_map[func].put(args[0] , a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(a , 'cache_info' , a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
import numpy
class A__ :
def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : int = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase__ : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase__ : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : Dict = input_arr
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return (value) * (1 - (value))
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 69 | 1 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase = 16 , lowerCamelCase = 88 , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = 32 , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "geglu" , lowerCamelCase = None , ):
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCamelCase , attention_head_dim=lowerCamelCase , in_channels=lowerCamelCase , num_layers=lowerCamelCase , dropout=lowerCamelCase , norm_num_groups=lowerCamelCase , cross_attention_dim=lowerCamelCase , attention_bias=lowerCamelCase , sample_size=lowerCamelCase , num_vector_embeds=lowerCamelCase , activation_fn=lowerCamelCase , num_embeds_ada_norm=lowerCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = True , ):
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowerCamelCase , encoder_hidden_states=lowerCamelCase , timestep=lowerCamelCase , cross_attention_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCamelCase )
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
from torch.utils.cpp_extension import load
_snake_case = Path(lowerCAmelCase_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_snake_case = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , lowerCAmelCase_ , with_cuda=lowerCAmelCase_ , extra_include_paths=[str(lowerCAmelCase_ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 703 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_snake_case = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = object_detector(examples[0] , threshold=0.0 )
_snake_case = len(lowerCAmelCase_ )
self.assertGreater(lowerCAmelCase_ , 0 )
self.assertEqual(
lowerCAmelCase_ , [
{
'score': ANY(lowerCAmelCase_ ),
'label': ANY(lowerCAmelCase_ ),
'box': {'xmin': ANY(lowerCAmelCase_ ), 'ymin': ANY(lowerCAmelCase_ ), 'xmax': ANY(lowerCAmelCase_ ), 'ymax': ANY(lowerCAmelCase_ )},
}
for i in range(lowerCAmelCase_ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_snake_case = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
_snake_case = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
_snake_case = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0.2
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 2
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 542 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ : Union[str, Any] ={
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] =[
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] =[
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__magic_name__ : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCamelCase__ ):
UpperCAmelCase_ : List[str] = ['image_processor', 'tokenizer']
UpperCAmelCase_ : Optional[int] = 'Pix2StructImageProcessor'
UpperCAmelCase_ : int = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , lowercase__ , lowercase__ ) -> Dict:
__A = False
super().__init__(_a , _a )
def __call__( self , lowercase__=None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 2048 , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
__A = self.tokenizer
__A = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__A = self.image_processor(
_a , return_tensors=_a , max_patches=_a , **_a )
else:
# add pixel_values and bbox
__A = self.image_processor(
_a , return_tensors=_a , max_patches=_a , header_text=_a , **_a )
if text is not None and not self.image_processor.is_vqa:
__A = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
if "attention_mask" in text_encoding:
__A = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
__A = text_encoding.pop("input_ids" )
else:
__A = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def _lowerCamelCase ( self , *lowercase__ , **lowercase__ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def _lowerCamelCase ( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
return self.tokenizer.decode(*_a , **_a )
@property
def _lowerCamelCase ( self ) -> List[Any]:
__A = self.tokenizer.model_input_names
__A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 710 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ : List[str] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any =['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case_ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 205 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
SCREAMING_SNAKE_CASE = 'naver-clova-ix/donut-base'
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =DonutProcessor.from_pretrained(UpperCAmelCase )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : Tuple ={
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase : List[str] =(
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase : Optional[Any] =self.processor.tokenajson(UpperCAmelCase )
self.assertDictEqual(UpperCAmelCase , UpperCAmelCase )
| 94 |
'''simple docstring'''
from torch import nn
class A ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ ) -> List[Any]:
super().__init__()
_a = class_size
_a = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_a = nn.Linear(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_a = self.mlp(snake_case_ )
return logits
| 131 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
snake_case_ : List[str] = sorted(string.lower() )
return len(__SCREAMING_SNAKE_CASE ) == len(set(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
a_ = input("Enter a string ").strip()
a_ = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 718 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = """ClapFeatureExtractor"""
UpperCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , lowercase_ , lowercase_):
super().__init__(lowercase_ , lowercase_)
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_):
snake_case_ : Any = kwargs.pop("sampling_rate" , lowercase_)
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none.")
if text is not None:
snake_case_ : Optional[int] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if audios is not None:
snake_case_ : Any = self.feature_extractor(
lowercase_ , sampling_rate=lowercase_ , return_tensors=lowercase_ , **lowercase_)
if text is not None and audios is not None:
snake_case_ : Dict = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_)
def snake_case__ ( self , *lowercase_ , **lowercase_):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def snake_case__ ( self , *lowercase_ , **lowercase_):
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def snake_case__ ( self):
snake_case_ : Union[str, Any] = self.tokenizer.model_input_names
snake_case_ : int = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 92 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( _lowercase = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
lowerCamelCase_ : Any = nums[0]
for i in range(1 , len(_lowercase ) ):
lowerCamelCase_ : str = nums[i]
lowerCamelCase_ : Union[str, Any] = max(_lowercase , ans + num , _lowercase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__lowercase : List[Any] = int(input('''Enter number of elements : ''').strip())
__lowercase : Optional[int] = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 422 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowercase : str = logging.get_logger(__name__)
__lowercase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Any = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowercase : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
__lowercase : Optional[int] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Any = RoFormerTokenizer
def __init__(self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
lowerCamelCase_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , A ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , A ) != strip_accents
):
lowerCamelCase_ : Any = getattr(A , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : List[Any] = strip_accents
lowerCamelCase_ : Any = pre_tok_class(**A )
lowerCamelCase_ : str = do_lower_case
def __getstate__(self ):
lowerCamelCase_ : Optional[Any] = self.__dict__.copy()
lowerCamelCase_ : List[Any] = BertPreTokenizer()
return state
def __setstate__(self , A ):
lowerCamelCase_ : str = d
lowerCamelCase_ : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
lowerCamelCase_ : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(A ) )
def UpperCAmelCase__ (self , A , A=None ):
lowerCamelCase_ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Optional[int] = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def UpperCAmelCase__ (self , A , A=None , A=None , A=False , **A , ):
lowerCamelCase_ : str = BertPreTokenizer()
return super().save_pretrained(A , A , A , A , **A )
| 422 | 1 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Tuple = CustomTokenizer
pass | 396 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 396 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Optional[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> List[Any]:
'''simple docstring'''
return (-y * np.log(UpperCAmelCase_ ) - (1 - y) * np.log(1 - h )).mean()
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Any:
'''simple docstring'''
_lowercase : Tuple = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
return np.sum(y * scores - np.log(1 + np.exp(UpperCAmelCase_ ) ) )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=70000 ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = np.zeros(x.shape[1] )
for iterations in range(UpperCAmelCase_ ):
_lowercase : Optional[int] = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
_lowercase : Union[str, Any] = sigmoid_function(UpperCAmelCase_ )
_lowercase : Dict = np.dot(x.T , h - y ) / y.size
_lowercase : Optional[Any] = theta - alpha * gradient # updating the weights
_lowercase : Tuple = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
_lowercase : Optional[int] = sigmoid_function(UpperCAmelCase_ )
_lowercase : List[str] = cost_function(UpperCAmelCase_ , UpperCAmelCase_ )
if iterations % 100 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ = datasets.load_iris()
UpperCamelCase__ = iris.data[:, :2]
UpperCamelCase__ = (iris.target != 0) * 1
UpperCamelCase__ = 0.1
UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Optional[int]:
'''simple docstring'''
return sigmoid_function(
np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show() | 322 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
def __init__( self : Optional[int] , UpperCamelCase : UNetaDModel , UpperCamelCase : KarrasVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self : List[str] , UpperCamelCase : int = 1 , UpperCamelCase : int = 50 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , **UpperCamelCase : Optional[int] , ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.unet.config.sample_size
_lowercase : Dict = (batch_size, 3, img_size, img_size)
_lowercase : int = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowercase : Any = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowercase : Any = self.scheduler.schedule[t]
_lowercase : Tuple = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowercase , _lowercase : Any = self.scheduler.add_noise_to_input(UpperCamelCase , UpperCamelCase , generator=UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowercase : Dict = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase : Optional[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_lowercase : Any = self.scheduler.step_correct(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , step_output.prev_sample , step_output['''derivative'''] , )
_lowercase : Dict = step_output.prev_sample
_lowercase : List[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
_lowercase : List[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase ) | 322 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
A__ = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
A__ = dict(scheduler.config )
A__ = 1
A__ = FrozenDict(UpperCAmelCase__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
A__ = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
A__ = dict(scheduler.config )
A__ = True
A__ = FrozenDict(UpperCAmelCase__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=UpperCAmelCase__ , segmentation_processor=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def __A ( self , UpperCAmelCase__ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def __A ( self ):
self.enable_attention_slicing(UpperCAmelCase__ )
def __A ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A__ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 50 , UpperCAmelCase__ = 7.5 , UpperCAmelCase__ = None , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 0.0 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = 1 , **UpperCAmelCase__ , ):
A__ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
A__ = self.segmentation_model(**UpperCAmelCase__ )
A__ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
A__ = self.numpy_to_pil(UpperCAmelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
A__ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , mask_image=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , )
| 232 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : Optional[int] = 16
UpperCAmelCase_ : List[Any] = 32
def UpperCamelCase ( _A : Accelerator , _A : int = 16 )-> Dict:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_A : Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_A , batched=_A , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_A , padding="longest" , max_length=_A , pad_to_multiple_of=_A , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_A , collate_fn=_A , batch_size=_A , drop_last=_A )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_A , collate_fn=_A , batch_size=_A , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def UpperCamelCase ( _A : str , _A : List[str] )-> Union[str, Any]:
"""simple docstring"""
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(_A )
A__ , A__ = get_dataloaders(_A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_A )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=100 , num_training_steps=(len(_A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
_A , _A , _A , _A , _A )
# Now we train the model
for epoch in range(_A ):
model.train()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_A )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_A )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_A , references=_A , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _A )
def UpperCamelCase ( )-> Any:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_A , default=_A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
A__ = parser.parse_args()
A__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 232 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"facebook/nllb-large-en-ro": 10_24,
"facebook/nllb-200-distilled-600M": 10_24,
}
# fmt: off
UpperCamelCase_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ['input_ids', 'attention_mask']
lowerCamelCase_ = NllbTokenizer
lowerCamelCase_ = []
lowerCamelCase_ = []
def __init__( self : int , snake_case_ : int=None , snake_case_ : List[str]=None , snake_case_ : Any="<s>" , snake_case_ : List[str]="</s>" , snake_case_ : str="</s>" , snake_case_ : Optional[int]="<s>" , snake_case_ : Any="<unk>" , snake_case_ : Any="<pad>" , snake_case_ : Any="<mask>" , snake_case_ : Tuple=None , snake_case_ : Optional[int]=None , snake_case_ : Dict=None , snake_case_ : Union[str, Any]=False , **snake_case_ : List[Any] , ):
"""simple docstring"""
A : Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
A : Any = legacy_behaviour
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , legacy_behaviour=snake_case_ , **snake_case_ , )
A : List[str] = vocab_file
A : Union[str, Any] = False if not self.vocab_file else True
A : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
A : List[Any] = {
lang_code: self.convert_tokens_to_ids(snake_case_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A : Optional[int] = src_lang if src_lang is not None else '''eng_Latn'''
A : List[Any] = self.convert_tokens_to_ids(self._src_lang )
A : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : Any , snake_case_ : str ):
"""simple docstring"""
A : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : Tuple , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
"""simple docstring"""
A : Optional[int] = [self.sep_token_id]
A : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self : Dict , snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A : List[Any] = src_lang
A : int = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
A : List[Any] = self.convert_tokens_to_ids(snake_case_ )
A : List[str] = tgt_lang_id
return inputs
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str = "eng_Latn" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "fra_Latn" , **snake_case_ : Any , ):
"""simple docstring"""
A : Tuple = src_lang
A : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self : Dict , snake_case_ : Any ):
"""simple docstring"""
A : Optional[Any] = self.convert_tokens_to_ids(snake_case_ )
if self.legacy_behaviour:
A : Optional[Any] = []
A : int = [self.eos_token_id, self.cur_lang_code]
else:
A : int = [self.cur_lang_code]
A : List[str] = [self.eos_token_id]
A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
A : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : str ):
"""simple docstring"""
A : Any = self.convert_tokens_to_ids(snake_case_ )
if self.legacy_behaviour:
A : int = []
A : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
A : str = [self.cur_lang_code]
A : List[str] = [self.eos_token_id]
A : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
A : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
A : Tuple = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,) | 256 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = 'markuplm'
def __init__( self : List[Any] , snake_case_ : List[str]=3_0522 , snake_case_ : str=768 , snake_case_ : str=12 , snake_case_ : Optional[Any]=12 , snake_case_ : Any=3072 , snake_case_ : Dict="gelu" , snake_case_ : Dict=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : int=512 , snake_case_ : Optional[Any]=2 , snake_case_ : int=0.02 , snake_case_ : Optional[Any]=1E-12 , snake_case_ : Dict=0 , snake_case_ : Optional[int]=0 , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=256 , snake_case_ : Union[str, Any]=1024 , snake_case_ : Optional[Any]=216 , snake_case_ : Optional[Any]=1001 , snake_case_ : Tuple=32 , snake_case_ : str=50 , snake_case_ : int="absolute" , snake_case_ : List[Any]=True , snake_case_ : List[Any]=None , **snake_case_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
A : int = vocab_size
A : Dict = hidden_size
A : str = num_hidden_layers
A : List[Any] = num_attention_heads
A : int = hidden_act
A : List[Any] = intermediate_size
A : Optional[Any] = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : str = max_position_embeddings
A : Dict = type_vocab_size
A : Optional[int] = initializer_range
A : Optional[Any] = layer_norm_eps
A : Any = position_embedding_type
A : List[Any] = use_cache
A : List[str] = classifier_dropout
# additional properties
A : Optional[Any] = max_depth
A : Tuple = max_xpath_tag_unit_embeddings
A : str = max_xpath_subs_unit_embeddings
A : Dict = tag_pad_id
A : Dict = subs_pad_id
A : List[str] = xpath_unit_hidden_size | 256 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Any = ["image_processor", "tokenizer"]
UpperCAmelCase_ : Any = "ViTImageProcessor"
UpperCAmelCase_ : Tuple = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase__ , )
SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase__ , lowercase__ )
def __call__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ) -> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def A ( self , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def A ( self , *lowercase__ , **lowercase__ ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def A ( self ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase__ , )
return self.image_processor_class
@property
def A ( self ) -> Tuple:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase__ , )
return self.image_processor
| 406 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = int(max_value - min_value ) + 1
SCREAMING_SNAKE_CASE = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE_ )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 406 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : bool = False , ) -> Any:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Embedding(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = nn.Embedding(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = False
_UpperCAmelCase = nn.Dropout(p=lowerCamelCase )
_UpperCAmelCase = TaConfig(
vocab_size=lowerCamelCase , d_model=lowerCamelCase , num_heads=lowerCamelCase , d_kv=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase , feed_forward_proj=lowerCamelCase , is_decoder=lowerCamelCase , is_encoder_decoder=lowerCamelCase , )
_UpperCAmelCase = nn.ModuleList()
for lyr_num in range(lowerCamelCase ):
_UpperCAmelCase = TaBlock(lowerCamelCase )
self.encoders.append(lowerCamelCase )
_UpperCAmelCase = TaLayerNorm(lowerCamelCase )
_UpperCAmelCase = nn.Dropout(p=lowerCamelCase )
def lowerCamelCase ( self : List[str] , lowerCamelCase : int , lowerCamelCase : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.token_embedder(lowerCamelCase )
_UpperCAmelCase = encoder_input_tokens.shape[1]
_UpperCAmelCase = torch.arange(lowerCamelCase , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase )
_UpperCAmelCase = self.dropout_pre(lowerCamelCase )
# inverted the attention mask
_UpperCAmelCase = encoder_input_tokens.size()
_UpperCAmelCase = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase )
for lyr in self.encoders:
_UpperCAmelCase = lyr(lowerCamelCase , lowerCamelCase )[0]
_UpperCAmelCase = self.layer_norm(lowerCamelCase )
return self.dropout_post(lowerCamelCase ), encoder_inputs_mask | 108 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 347 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = ["pixel_values"]
def __init__( self : Union[str, Any] ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : bool = True ,**_snake_case : List[Any] ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Tuple = size if size is not None else {'''height''': 384, '''width''': 384}
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case )
lowercase__ : str = do_resize
lowercase__ : Tuple = size
lowercase__ : int = resample
lowercase__ : Dict = do_rescale
lowercase__ : str = rescale_factor
lowercase__ : Any = do_normalize
lowercase__ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : str = do_convert_rgb
def UpperCAmelCase ( self : Tuple ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : List[Any] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
lowercase__ : Optional[int] = (size['''height'''], size['''width'''])
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : str ,_snake_case : np.ndarray ,_snake_case : Union[int, float] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Any ,) -> Union[str, Any]:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Union[str, Any] ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : ImageInput ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Dict[str, int]] = None ,_snake_case : PILImageResampling = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[float] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : bool = None ,_snake_case : ChannelDimension = ChannelDimension.FIRST ,**_snake_case : Union[str, Any] ,) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ : str = do_resize if do_resize is not None else self.do_resize
lowercase__ : Optional[Any] = resample if resample is not None else self.resample
lowercase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Any = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Union[str, Any] = size if size is not None else self.size
lowercase__ : str = get_size_dict(_snake_case ,default_to_square=_snake_case )
lowercase__ : int = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Optional[Any] = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : Dict = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : Optional[Any] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : Any = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} ,tensor_type=_snake_case )
return encoded_outputs
| 122 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCAmelCase_ = data_utils.TransfoXLTokenizer
lowerCAmelCase_ = data_utils.TransfoXLCorpus
lowerCAmelCase_ = data_utils
lowerCAmelCase_ = data_utils
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowerCamelCase , '''rb''' ) as fp:
lowercase__ : Any = pickle.load(__lowerCamelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowercase__ : str = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowercase__ : Dict = corpus.vocab.__dict__
torch.save(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __lowerCamelCase )
lowercase__ : int = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowercase__ : int = os.path.abspath(__lowerCamelCase )
lowercase__ : List[Any] = os.path.abspath(__lowerCamelCase )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowercase__ : Union[str, Any] = TransfoXLConfig()
else:
lowercase__ : str = TransfoXLConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : List[str] = TransfoXLLMHeadModel(__lowerCamelCase )
lowercase__ : Tuple = load_tf_weights_in_transfo_xl(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
lowercase__ : int = os.path.join(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__lowerCamelCase )}""" )
torch.save(model.state_dict() , __lowerCamelCase )
print(f"""Save configuration file to {os.path.abspath(__lowerCamelCase )}""" )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowerCAmelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 122 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 / sqrt(2 ) ):
snake_case_ = tau * frequency / samplerate
snake_case_ = sin(SCREAMING_SNAKE_CASE__ )
snake_case_ = cos(SCREAMING_SNAKE_CASE__ )
snake_case_ = _sin / (2 * q_factor)
snake_case_ = (1 - _cos) / 2
snake_case_ = 1 - _cos
snake_case_ = 1 + alpha
snake_case_ = -2 * _cos
snake_case_ = 1 - alpha
snake_case_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 / sqrt(2 ) ):
snake_case_ = tau * frequency / samplerate
snake_case_ = sin(SCREAMING_SNAKE_CASE__ )
snake_case_ = cos(SCREAMING_SNAKE_CASE__ )
snake_case_ = _sin / (2 * q_factor)
snake_case_ = (1 + _cos) / 2
snake_case_ = -1 - _cos
snake_case_ = 1 + alpha
snake_case_ = -2 * _cos
snake_case_ = 1 - alpha
snake_case_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 / sqrt(2 ) ):
snake_case_ = tau * frequency / samplerate
snake_case_ = sin(SCREAMING_SNAKE_CASE__ )
snake_case_ = cos(SCREAMING_SNAKE_CASE__ )
snake_case_ = _sin / (2 * q_factor)
snake_case_ = _sin / 2
snake_case_ = 0
snake_case_ = -ba
snake_case_ = 1 + alpha
snake_case_ = -2 * _cos
snake_case_ = 1 - alpha
snake_case_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 / sqrt(2 ) ):
snake_case_ = tau * frequency / samplerate
snake_case_ = sin(SCREAMING_SNAKE_CASE__ )
snake_case_ = cos(SCREAMING_SNAKE_CASE__ )
snake_case_ = _sin / (2 * q_factor)
snake_case_ = 1 - alpha
snake_case_ = -2 * _cos
snake_case_ = 1 + alpha
snake_case_ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 / sqrt(2 ) , ):
snake_case_ = tau * frequency / samplerate
snake_case_ = sin(SCREAMING_SNAKE_CASE__ )
snake_case_ = cos(SCREAMING_SNAKE_CASE__ )
snake_case_ = _sin / (2 * q_factor)
snake_case_ = 10 ** (gain_db / 40)
snake_case_ = 1 + alpha * big_a
snake_case_ = -2 * _cos
snake_case_ = 1 - alpha * big_a
snake_case_ = 1 + alpha / big_a
snake_case_ = -2 * _cos
snake_case_ = 1 - alpha / big_a
snake_case_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 / sqrt(2 ) , ):
snake_case_ = tau * frequency / samplerate
snake_case_ = sin(SCREAMING_SNAKE_CASE__ )
snake_case_ = cos(SCREAMING_SNAKE_CASE__ )
snake_case_ = _sin / (2 * q_factor)
snake_case_ = 10 ** (gain_db / 40)
snake_case_ = (big_a + 1) - (big_a - 1) * _cos
snake_case_ = (big_a + 1) + (big_a - 1) * _cos
snake_case_ = (big_a - 1) - (big_a + 1) * _cos
snake_case_ = (big_a - 1) + (big_a + 1) * _cos
snake_case_ = 2 * sqrt(SCREAMING_SNAKE_CASE__ ) * alpha
snake_case_ = big_a * (pmc + aaa)
snake_case_ = 2 * big_a * mpc
snake_case_ = big_a * (pmc - aaa)
snake_case_ = ppmc + aaa
snake_case_ = -2 * pmpc
snake_case_ = ppmc - aaa
snake_case_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 / sqrt(2 ) , ):
snake_case_ = tau * frequency / samplerate
snake_case_ = sin(SCREAMING_SNAKE_CASE__ )
snake_case_ = cos(SCREAMING_SNAKE_CASE__ )
snake_case_ = _sin / (2 * q_factor)
snake_case_ = 10 ** (gain_db / 40)
snake_case_ = (big_a + 1) - (big_a - 1) * _cos
snake_case_ = (big_a + 1) + (big_a - 1) * _cos
snake_case_ = (big_a - 1) - (big_a + 1) * _cos
snake_case_ = (big_a - 1) + (big_a + 1) * _cos
snake_case_ = 2 * sqrt(SCREAMING_SNAKE_CASE__ ) * alpha
snake_case_ = big_a * (ppmc + aaa)
snake_case_ = -2 * big_a * pmpc
snake_case_ = big_a * (ppmc - aaa)
snake_case_ = pmc + aaa
snake_case_ = 2 * mpc
snake_case_ = pmc - aaa
snake_case_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 39 |
"""simple docstring"""
snake_case_ : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase_ ( _lowercase : bytes ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase : int = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_lowercase )
UpperCAmelCase : List[Any] = "".join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase : int = B"=" * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase : List[Any] = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Optional[Any] = (
"argument should be a bytes-like object or ASCII string, "
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase : List[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
UpperCAmelCase : List[str] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase : List[str] = encoded_data[:-padding]
UpperCAmelCase : Any = "".join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase : Any = "".join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595 | 0 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
lowercase = torch.load(__snake_case , map_location='cpu' )
lowercase = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowercase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase = v
else:
lowercase = v
lowercase = chkpt['params']
lowercase = {n: v for n, v in config.items() if not isinstance(__snake_case , (torch.FloatTensor, numpy.ndarray) )}
lowercase = chkpt['dico_word2id']
lowercase = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowercase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(__snake_case , __snake_case )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case , indent=2 ) + '\n' )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case , indent=2 ) + '\n' )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase : Optional[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 134 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a :
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = inputs['prompt']
lowercase = inputs['generator']
lowercase = inputs['num_inference_steps']
lowercase = inputs['output_type']
if "image" in inputs:
lowercase = inputs['image']
else:
lowercase = None
if "mask_image" in inputs:
lowercase = inputs['mask_image']
else:
lowercase = None
if "original_image" in inputs:
lowercase = inputs['original_image']
else:
lowercase = None
lowercase , lowercase = pipe.encode_prompt(_lowerCamelCase )
# inputs with prompt converted to embeddings
lowercase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
lowercase = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = inputs['generator']
lowercase = inputs['num_inference_steps']
lowercase = inputs['output_type']
# inputs with prompt converted to embeddings
lowercase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
lowercase = pipe_loaded(**_lowerCamelCase )[0]
lowercase = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
def UpperCamelCase_ ( self ):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
lowercase = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = pipe_loaded(**_lowerCamelCase )[0]
lowercase = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
| 134 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: list , SCREAMING_SNAKE_CASE: list , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 580 |
"""simple docstring"""
_snake_case = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
1_0: '''a''',
1_1: '''b''',
1_2: '''c''',
1_3: '''d''',
1_4: '''e''',
1_5: '''f''',
}
def __snake_case ( SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
assert type(SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ''
_lowerCAmelCase = False
if decimal < 0:
_lowerCAmelCase = True
decimal *= -1
while decimal > 0:
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE , 16 )
_lowerCAmelCase = values[remainder] + hexadecimal
_lowerCAmelCase = '0x' + hexadecimal
if negative:
_lowerCAmelCase = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 580 | 1 |
"""simple docstring"""
import numpy as np
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1e-12 , SCREAMING_SNAKE_CASE_ = 100 , ):
assert np.shape(SCREAMING_SNAKE_CASE_ )[0] == np.shape(SCREAMING_SNAKE_CASE_ )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE_ )[0] == np.shape(SCREAMING_SNAKE_CASE_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE_ ) == np.iscomplexobj(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Any = np.iscomplexobj(SCREAMING_SNAKE_CASE_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : str = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Dict = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase : Optional[Any] = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Normalize the resulting output vector.
_lowerCamelCase : str = w / np.linalg.norm(SCREAMING_SNAKE_CASE_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase : List[Any] = vector.conj().T if is_complex else vector.T
_lowerCamelCase : Union[str, Any] = np.dot(SCREAMING_SNAKE_CASE_ , np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Check convergence.
_lowerCamelCase : Dict = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = lambda_
if is_complex:
_lowerCamelCase : Tuple = np.real(lambda_ )
return lambda_, vector
def UpperCamelCase ( ):
_lowerCamelCase : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase : Optional[Any] = np.array([41, 4, 20] )
_lowerCamelCase : Tuple = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase : Optional[Any] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase : Optional[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase : Any = real_input_matrix
_lowerCamelCase : Tuple = real_vector
elif problem_type == "complex":
_lowerCamelCase : Tuple = complex_input_matrix
_lowerCamelCase : Dict = complex_vector
# Our implementation.
_lowerCamelCase : List[Any] = power_iteration(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase : List[str] = np.linalg.eigh(SCREAMING_SNAKE_CASE_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase : int = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase : int = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE_ ) - np.abs(SCREAMING_SNAKE_CASE_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 721 | """simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = False , ) -> Tuple:
super().__init__()
_lowerCamelCase : Tuple = nn.Embedding(_lowercase , _lowercase )
_lowerCamelCase : Dict = nn.Embedding(_lowercase , _lowercase )
_lowerCamelCase : Tuple = False
_lowerCamelCase : Any = nn.Dropout(p=_lowercase )
_lowerCamelCase : List[Any] = TaConfig(
vocab_size=_lowercase , d_model=_lowercase , num_heads=_lowercase , d_kv=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , feed_forward_proj=_lowercase , is_decoder=_lowercase , is_encoder_decoder=_lowercase , )
_lowerCamelCase : List[Any] = nn.ModuleList()
for lyr_num in range(_lowercase ):
_lowerCamelCase : Tuple = TaBlock(_lowercase )
self.encoders.append(_lowercase )
_lowerCamelCase : str = TaLayerNorm(_lowercase )
_lowerCamelCase : List[Any] = nn.Dropout(p=_lowercase )
def a__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
_lowerCamelCase : List[Any] = self.token_embedder(_lowercase )
_lowerCamelCase : Union[str, Any] = encoder_input_tokens.shape[1]
_lowerCamelCase : int = torch.arange(_lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowercase )
_lowerCamelCase : Tuple = self.dropout_pre(_lowercase )
# inverted the attention mask
_lowerCamelCase : int = encoder_input_tokens.size()
_lowerCamelCase : Union[str, Any] = self.get_extended_attention_mask(_lowercase , _lowercase )
for lyr in self.encoders:
_lowerCamelCase : List[Any] = lyr(_lowercase , _lowercase )[0]
_lowerCamelCase : str = self.layer_norm(_lowercase )
return self.dropout_post(_lowercase ), encoder_inputs_mask
| 558 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : str ) -> bool:
lowerCamelCase_ = 0
for ch in input_str:
lowerCamelCase_ = ord(_lowerCamelCase )
lowerCamelCase_ = pow(2 , _lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class lowercase__ ( __A ):
def __init__( self , *_lowercase , **_lowercase ):
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 440 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : int = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 440 | 1 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __snake_case (lowerCamelCase ):
def __lt__( self: str , A_: Union[str, Any] ):
return self[-1] < other[-1]
def __eq__( self: Optional[int] , A_: Dict ):
return self[-1] == other[-1]
def a_ ( lowercase__ :list ):
__lowerCamelCase = []
# sort into stacks
for element in collection:
__lowerCamelCase = Stack([element] )
__lowerCamelCase = bisect_left(lowercase__, lowercase__ )
if i != len(lowercase__ ):
stacks[i].append(lowercase__ )
else:
stacks.append(lowercase__ )
# use a heap-based merge to merge stack efficiently
__lowerCamelCase = merge(*(reversed(lowercase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
__magic_name__ : Any = input('Enter numbers separated by a comma:\n').strip()
__magic_name__ : int = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 281 |
"""simple docstring"""
def a_ ( lowercase__ :str ):
__lowerCamelCase = [int(lowercase__ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(lowercase__ ) == 4 and all(0 <= int(lowercase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
__magic_name__ : Tuple = input().strip()
__magic_name__ : Optional[int] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 281 | 1 |
def __lowerCAmelCase ( __UpperCamelCase : int = 4_0_0_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = [0, 1]
snake_case_ : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case_ : Optional[int] = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 704 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ : Dict = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__magic_name__ : List[Any] = logging.get_logger(__name__)
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = set()
lowerCAmelCase__ = []
def parse_line(__lowerCAmelCase ):
for line in fp:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(__lowerCAmelCase ) > 0:
lowerCAmelCase__ = '''\n'''.join(__lowerCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(__lowerCAmelCase )
buffer.clear()
continue
else:
lowerCAmelCase__ = line.strip()
buffer.append(__lowerCAmelCase )
if from_gh:
for filename in os.listdir(__lowerCAmelCase ):
lowerCAmelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isdir(__lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(__lowerCAmelCase ) as fp:
parse_line(__lowerCAmelCase )
else:
try:
with zipfile.ZipFile(__lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__lowerCAmelCase ) as fp:
parse_line(__lowerCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = set()
lowerCAmelCase__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for p in os.listdir(__lowerCAmelCase ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__lowerCAmelCase , __lowerCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def a_ ( __lowerCAmelCase ):
return values.split(''',''' )
__magic_name__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__magic_name__ : Optional[Any] = parser.parse_args()
__magic_name__ : Optional[int] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__magic_name__ : Optional[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__magic_name__ : Optional[Any] = extract_warnings(args.output_dir, args.targets)
__magic_name__ : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 615 | 1 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = []
def _snake_case ( A , A , A ) -> bool:
for i in range(len(A ) ):
if board[row][i] == 1:
return False
for i in range(len(A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(A , -1 , -1 ) , range(A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(A , -1 , -1 ) , range(A , len(A ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( A , A ) -> bool:
if row >= len(A ):
solution.append(A )
printboard(A )
print()
return True
for i in range(len(A ) ):
if is_safe(A , A , A ):
lowerCAmelCase__ = 1
solve(A , row + 1 )
lowerCAmelCase__ = 0
return False
def _snake_case ( A ) -> None:
for i in range(len(A ) ):
for j in range(len(A ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__UpperCAmelCase = 8
__UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution)) | 98 |
'''simple docstring'''
import numpy as np
import qiskit
def _snake_case ( A = 8 , A = None ) -> str:
lowerCAmelCase__ = np.random.default_rng(seed=A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCAmelCase__ = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCAmelCase__ = rng.integers(2 , size=A )
# The set of states Alice will prepare.
lowerCAmelCase__ = rng.integers(2 , size=A )
# Measurement basis for Bob's qubits.
lowerCAmelCase__ = rng.integers(2 , size=A )
# Quantum Circuit to simulate BB84
lowerCAmelCase__ = qiskit.QuantumCircuit(A , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A ):
if alice_state[index] == 1:
bbaa_circ.x(A )
if alice_basis[index] == 1:
bbaa_circ.h(A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A ):
if bob_basis[index] == 1:
bbaa_circ.h(A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCAmelCase__ = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCAmelCase__ = qiskit.execute(A , A , shots=1 , seed_simulator=A )
# Returns the result of measurement.
lowerCAmelCase__ = job.result().get_counts(A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCAmelCase__ = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A , A , A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCAmelCase__ = gen_key[:key_len] if len(A ) >= key_len else gen_key.ljust(A , '''0''' )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod() | 98 | 1 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowercase_ ( a ):
'''simple docstring'''
def __init__( self , *a_ , **a_ ) -> str:
"""simple docstring"""
super().__init__(*a_ , **a_ )
requires_backends(self , 'decord' )
self.check_model_type(a_ )
def snake_case_ ( self , a_=None , a_=None , a_=None ) -> int:
"""simple docstring"""
UpperCAmelCase = {}
if frame_sampling_rate is not None:
UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase = num_frames
UpperCAmelCase = {}
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a_ , **a_ ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(a_ , **a_ )
def snake_case_ ( self , a_ , a_=None , a_=1 ) -> Tuple:
"""simple docstring"""
if num_frames is None:
UpperCAmelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCAmelCase = BytesIO(requests.get(a_ ).content )
UpperCAmelCase = VideoReader(a_ )
videoreader.seek(0 )
UpperCAmelCase = 0
UpperCAmelCase = num_frames * frame_sampling_rate - 1
UpperCAmelCase = np.linspace(a_ , a_ , num=a_ , dtype=np.intaa )
UpperCAmelCase = videoreader.get_batch(a_ ).asnumpy()
UpperCAmelCase = list(a_ )
UpperCAmelCase = self.image_processor(a_ , return_tensors=self.framework )
return model_inputs
def snake_case_ ( self , a_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model(**a_ )
return model_outputs
def snake_case_ ( self , a_ , a_=5 ) -> Union[str, Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(a_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_ , a_ )]
| 447 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger()
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : nn.Module
__lowerCAmelCase : List[nn.Module] = field(default_factory=a )
__lowerCAmelCase : list = field(default_factory=a )
def snake_case_ ( self , a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self , a_ ) -> Union[str, Any]:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : nn.Module
__lowerCAmelCase : nn.Module
__lowerCAmelCase : int = 0
__lowerCAmelCase : List = field(default_factory=a )
__lowerCAmelCase : List = field(default_factory=a )
def __call__( self , a_ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = Tracker(self.dest )(a_ ).parametrized
UpperCAmelCase = Tracker(self.src )(a_ ).parametrized
UpperCAmelCase = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
UpperCAmelCase = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(a_ )} operations while'''
F''' destination module has {len(a_ )}.''' )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : ResNetConfig , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool = True ):
print(f'''Converting {name}...''' )
with torch.no_grad():
UpperCAmelCase = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase = ResNetForImageClassification(SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase = ModuleTransfer(src=SCREAMING_SNAKE_CASE , dest=SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.randn((1, 3, 224, 224) )
module_transfer(SCREAMING_SNAKE_CASE )
assert torch.allclose(from_model(SCREAMING_SNAKE_CASE ) , our_model(SCREAMING_SNAKE_CASE ).logits ), "The model logits don't match the original one."
UpperCAmelCase = f'''resnet{'-'.join(name.split('resnet' ) )}'''
print(SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE , )
# we can use the convnext one
UpperCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE , )
print(f'''Pushed {checkpoint_name}''' )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : bool = True ):
UpperCAmelCase = 'imagenet-1k-id2label.json'
UpperCAmelCase = 1000
UpperCAmelCase = (1, num_labels)
UpperCAmelCase = 'huggingface/label-files'
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
UpperCAmelCase = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(SCREAMING_SNAKE_CASE , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_a : Tuple = parser.parse_args()
_a : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 447 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase : int = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase : Optional[Any] = get_tests_dir('''fixtures/dummy-config.json''')
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
snake_case_ : str = 0
def _lowerCAmelCase ( self ) -> str:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : List[Any] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Tuple = AutoConfig.for_model("roberta" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
snake_case_ : str = os.path.join(_SCREAMING_SNAKE_CASE , "fake-roberta" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
snake_case_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
try:
AutoConfig.register("custom" , _SCREAMING_SNAKE_CASE )
# Wrong model type will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoConfig.register("model" , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoConfig.register("bert" , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case_ : Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , "bert-base is not a local folder and is not a valid model identifier" ):
snake_case_ : Optional[Any] = AutoConfig.from_pretrained("bert-base" )
def _lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
snake_case_ : List[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE , revision="aaaaaa" )
def _lowerCAmelCase ( self ) -> str:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
snake_case_ : Dict = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowerCAmelCase ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
snake_case_ : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def _lowerCAmelCase ( self ) -> Optional[Any]:
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = 'new-model'
try:
AutoConfig.register("new-model" , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
snake_case_ : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
snake_case_ : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 114 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,**__UpperCamelCase : str ):
"""simple docstring"""
A_ = AutoConfig.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
A_ = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 86 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ , A_ = image.size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image[None].transpose(0 ,3 ,1 ,2 )
A_ = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
A_ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = preprocess(UpperCAmelCase )
A_ , A_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ = next(self.unet.parameters() ).dtype
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
A_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
A_ = torch.cat([latents, image] , dim=1 )
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
A_ = self.vqvae.decode(UpperCAmelCase ).sample
A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
A_ = image / 2 + 0.5
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 86 | 1 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def _A ( snake_case__ : Union[str, Any] ):
# getting number of pixels in the image
snake_case__ ,snake_case__ : Union[str, Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(snake_case__ ):
for j in range(snake_case__ ):
snake_case__ : List[str] = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_lowerCAmelCase : Optional[Any] = imread("image_data/lena.jpg", 1)
# convert to its negative
_lowerCAmelCase : str = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 694 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 1 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCAmelCase__ = 2_9979_2458
# Symbols
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = symbols('''ct x y z''')
def a_ (__A ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def a_ (__A ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(__A ) ** 2 )
def a_ (__A ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(__A ), -gamma(__A ) * beta(__A ), 0, 0],
[-gamma(__A ) * beta(__A ), gamma(__A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def a_ (__A , __A = None ) -> np.ndarray:
"""simple docstring"""
# Ensure event is not empty
if event is None:
__a : List[Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCAmelCase__ = transform(2997_9245)
print('''Example of four vector: ''')
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
UpperCAmelCase__ = {ct: c, x: 1, y: 1, z: 1}
UpperCAmelCase__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 351 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
"""simple docstring"""
def __init__(self: Optional[Any] , __UpperCAmelCase: List[str] , __UpperCAmelCase: int=12 , __UpperCAmelCase: Dict=7 , __UpperCAmelCase: str=True , __UpperCAmelCase: Tuple=True , __UpperCAmelCase: Tuple=True , __UpperCAmelCase: Optional[int]=99 , __UpperCAmelCase: str=32 , __UpperCAmelCase: Any=32 , __UpperCAmelCase: int=2 , __UpperCAmelCase: List[str]=4 , __UpperCAmelCase: Tuple=37 , __UpperCAmelCase: Optional[int]=0.1 , __UpperCAmelCase: List[Any]=0.1 , __UpperCAmelCase: Dict=512 , __UpperCAmelCase: Union[str, Any]=0.02 , __UpperCAmelCase: List[Any]=0 , __UpperCAmelCase: str=None , ) -> Tuple:
'''simple docstring'''
__a : Union[str, Any] = parent
__a : Dict = batch_size
__a : Optional[Any] = seq_length
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : str = use_labels
__a : Tuple = vocab_size
__a : Dict = hidden_size
__a : Optional[int] = projection_dim
__a : Tuple = num_hidden_layers
__a : Dict = num_attention_heads
__a : Any = intermediate_size
__a : Optional[int] = dropout
__a : Dict = attention_dropout
__a : List[Any] = max_position_embeddings
__a : Any = initializer_range
__a : Tuple = scope
__a : Dict = bos_token_id
def UpperCAmelCase__ (self: str ) -> Union[str, Any]:
'''simple docstring'''
__a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Any = None
if self.use_input_mask:
__a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__a : Tuple = input_mask.numpy()
__a , __a : int = input_mask.shape
__a : Dict = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__a : Any = 1
__a : int = 0
__a : Union[str, Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(__UpperCAmelCase )
def UpperCAmelCase__ (self: List[str] ) -> Optional[int]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: Any , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> List[str]:
'''simple docstring'''
__a : int = TFBlipTextModel(config=__UpperCAmelCase )
__a : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , training=__UpperCAmelCase )
__a : Any = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ (self: List[str] ) -> str:
'''simple docstring'''
__a : Tuple = self.prepare_config_and_inputs()
__a , __a , __a : Dict = config_and_inputs
__a : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (TFBlipTextModel,) if is_tf_available() else ()
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCAmelCase__ (self: List[str] ) -> Optional[int]:
'''simple docstring'''
__a : str = BlipTextModelTester(self )
__a : str = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ (self: str ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self: Any ) -> Union[str, Any]:
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase__ (self: Tuple ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCAmelCase__ (self: Any ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase__ (self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase__ (self: Union[str, Any] ) -> int:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ (self: Optional[Any] ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[str] = TFBlipTextModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: Optional[Any]=True ) -> Any:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCAmelCase )
| 351 | 1 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def _UpperCAmelCase ( __lowerCamelCase : str ) -> str:
if not sentence:
return ""
_snake_case = dict(zip(__lowerCamelCase , __lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str = "cpu" , __lowerCamelCase : Union[str, None] = None ) -> None:
_snake_case = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
_snake_case = v.half()
if save_path is None: # overwrite src_path
_snake_case = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 430 | 0 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__magic_name__ = get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = '''dummy_data'''
__UpperCAmelCase : Dict = '''datasets'''
__UpperCAmelCase : Union[str, Any] = False
def __init__( self , a_ , a_ , a_ , a_ = None , a_ = False , a_ = True , a_ = None , ):
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : List[Any] = dataset_name
lowerCamelCase_ : int = cache_dir
lowerCamelCase_ : Tuple = use_local_dummy_data
lowerCamelCase_ : Tuple = config
# download_callbacks take a single url as input
lowerCamelCase_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCamelCase_ : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCamelCase_ : Optional[Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Optional[Any] = None
@property
def _UpperCamelCase ( self ):
if self._dummy_file is None:
lowerCamelCase_ : Optional[int] = self.download_dummy_data()
return self._dummy_file
@property
def _UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def _UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCamelCase_ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def _UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _UpperCamelCase ( self ):
if self._bucket_url is None:
lowerCamelCase_ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def _UpperCamelCase ( self ):
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def _UpperCamelCase ( self , a_ , *a_ ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCamelCase_ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCamelCase_ : str = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self , a_ , *a_ ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self , a_ , a_ ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self , a_ , *a_ , **a_ ):
return path
def _UpperCamelCase ( self ):
return {}
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ : Tuple = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : str = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
lowerCamelCase_ : str = single_urls
lowerCamelCase_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
lowerCamelCase_ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCamelCase_ : int = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCamelCase_ : Any = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
lowerCamelCase_ : List[str] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCamelCase_ : Any = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def _UpperCamelCase ( self , a_ , a_ ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase_ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self , a_ ):
def _iter_archive_members(a_ ):
# this preserves the order of the members inside the ZIP archive
lowerCamelCase_ : Dict = Path(self.dummy_file ).parent
lowerCamelCase_ : Tuple = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCamelCase_ : Any = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Tuple = Path(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Union[str, Any] = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("rb" )
def _UpperCamelCase ( self , a_ ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : str = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 250 |
def lowercase_ ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 381 | 0 |
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
lowercase__ : Dict = name
lowercase__ : Union[str, Any] = val
def __str__( self ) -> Any:
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self , lowerCamelCase__ ) -> List[Any]:
return self.val < other.val
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : str = {}
lowercase__ : Union[str, Any] = {}
lowercase__ : Any = self.build_heap(snake_case_ )
def __getitem__( self , lowerCamelCase__ ) -> Optional[Any]:
return self.get_value(snake_case_ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Union[str, Any]:
return (idx - 1) // 2
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
return idx * 2 + 1
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Union[str, Any]:
return idx * 2 + 2
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
return self.heap_dict[key]
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
lowercase__ : List[Any] = len(snake_case_ ) - 1
lowercase__ : Any = self.get_parent_idx(snake_case_ )
for idx, i in enumerate(snake_case_ ):
lowercase__ : Any = idx
lowercase__ : List[Any] = i.val
for i in range(snake_case_ , -1 , -1 ):
self.sift_down(snake_case_ , snake_case_ )
return array
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
while True:
lowercase__ : Any = self.get_left_child_idx(snake_case_ ) # noqa: E741
lowercase__ : List[str] = self.get_right_child_idx(snake_case_ )
lowercase__ : Union[str, Any] = idx
if l < len(snake_case_ ) and array[l] < array[idx]:
lowercase__ : int = l
if r < len(snake_case_ ) and array[r] < array[smallest]:
lowercase__ : Dict = r
if smallest != idx:
lowercase__ , lowercase__ : int = array[smallest], array[idx]
(
(
lowercase__
) , (
lowercase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase__ : Any = smallest
else:
break
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
lowercase__ : List[str] = self.get_parent_idx(snake_case_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase__ , lowercase__ : Tuple = self.heap[idx], self.heap[p]
lowercase__ , lowercase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase__ : List[Any] = p
lowercase__ : List[str] = self.get_parent_idx(snake_case_ )
def UpperCAmelCase__( self ) -> Dict:
return self.heap[0]
def UpperCAmelCase__( self ) -> int:
lowercase__ , lowercase__ : Optional[int] = self.heap[-1], self.heap[0]
lowercase__ , lowercase__ : Union[str, Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase__ : Optional[int] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[int]:
self.heap.append(snake_case_ )
lowercase__ : Dict = len(self.heap ) - 1
lowercase__ : List[str] = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCAmelCase__( self ) -> Union[str, Any]:
return len(self.heap ) == 0
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase__ : str = new_value
lowercase__ : Optional[int] = new_value
self.sift_up(self.idx_of_element[node] )
__snake_case = Node('R', -1)
__snake_case = Node('B', 6)
__snake_case = Node('A', 3)
__snake_case = Node('X', 1)
__snake_case = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__snake_case = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
"""simple docstring"""
from math import sqrt
def _lowerCamelCase ( lowerCamelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( lowerCamelCase__ : int = 1_00_01 ):
lowercase__ : List[str] = 0
lowercase__ : Optional[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F"{solution() = }") | 128 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 329 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _SCREAMING_SNAKE_CASE ( a = 8 ) -> str:
__A : Optional[Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(a ) for _ in range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(a )
__A : Dict = i // 3
__A : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__A : str = (
chars_incl
+ random(a , quotient + remainder )
+ random(a , a )
+ random(a , a )
)
__A : int = list(a )
shuffle(a )
return "".join(a )
# random is a generalised function for letters, characters and numbers
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return "".join(secrets.choice(a ) for _ in range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[Any]:
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[Any]:
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( a , a = 8 ) -> bool:
if len(a ) < min_length:
# Your Password must be at least 8 characters long
return False
__A : Any = any(char in ascii_uppercase for char in password )
__A : Union[str, Any] = any(char in ascii_lowercase for char in password )
__A : List[str] = any(char in digits for char in password )
__A : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _SCREAMING_SNAKE_CASE ( ) -> int:
__A : Optional[Any] = int(input('Please indicate the max length of your password: ' ).strip() )
__A : Optional[Any] = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(a ) )
print(
'Alternative Password generated:' , alternative_password_generator(a , a ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 239 | 0 |
import qiskit
def _UpperCAmelCase ( UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
__lowerCAmelCase = qiskit.Aer.get_backend("aer_simulator" )
__lowerCAmelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCAmelCase = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 376 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( __UpperCAmelCase ):
lowercase_ : Union[str, Any] = 'segformer'
def __init__( self : List[str] , snake_case__ : Any=3 , snake_case__ : int=4 , snake_case__ : Tuple=[2, 2, 2, 2] , snake_case__ : Optional[int]=[8, 4, 2, 1] , snake_case__ : Union[str, Any]=[32, 64, 160, 256] , snake_case__ : str=[7, 3, 3, 3] , snake_case__ : List[Any]=[4, 2, 2, 2] , snake_case__ : Tuple=[1, 2, 5, 8] , snake_case__ : List[str]=[4, 4, 4, 4] , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : str=0.1 , snake_case__ : List[Any]=0.0_2 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=1E-6 , snake_case__ : Any=256 , snake_case__ : Optional[Any]=255 , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**snake_case__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , snake_case__ , )
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_encoder_blocks
__lowerCAmelCase = depths
__lowerCAmelCase = sr_ratios
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = patch_sizes
__lowerCAmelCase = strides
__lowerCAmelCase = mlp_ratios
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = decoder_hidden_size
__lowerCAmelCase = kwargs.get("reshape_last_stage" , snake_case__ )
__lowerCAmelCase = semantic_loss_ignore_index
class a ( __UpperCAmelCase ):
lowercase_ : List[str] = version.parse('1.11' )
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return 12
| 376 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """sentencepiece.bpe.model"""}
__A = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
__A = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
__A = """▁"""
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = VOCAB_FILES_NAMES
__magic_name__ :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
lowerCAmelCase__ :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = vocab_file
lowerCAmelCase__ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
lowerCAmelCase__ :Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
lowerCAmelCase__ :Tuple = len(self.sp_model ) - 1
lowerCAmelCase__ :Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
lowerCAmelCase__ :Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self ):
'''simple docstring'''
return len(self.sp_model )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ :Any = self.sp_model.PieceToId(__UpperCAmelCase )
return spm_id if spm_id else self.unk_token_id
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = []
lowerCAmelCase__ :str = ''
lowerCAmelCase__ :Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ :List[str] = True
lowerCAmelCase__ :List[Any] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.__dict__.copy()
lowerCAmelCase__ :List[str] = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ :List[Any] = {}
lowerCAmelCase__ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ :Tuple = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
lowerCAmelCase__ :List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 93 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class lowerCAmelCase ( A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase_ = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase_ = Features({"text": Value("string" )} )
lowerCAmelCase_ = Features({"summary": Value("string" )} )
lowerCAmelCase_ = "text"
lowerCAmelCase_ = "summary"
@property
def snake_case ( self : int ):
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 119 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = ['pixel_values']
def __init__(self : List[Any] , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PIL.Image.BICUBIC , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowerCamelCase_ : Any = size if size is not None else {'height': 256, 'width': 256}
lowerCamelCase_ : Union[str, Any] = get_size_dict(_snake_case )
lowerCamelCase_ : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ : Tuple = get_size_dict(_snake_case , param_name='crop_size' )
lowerCamelCase_ : int = do_resize
lowerCamelCase_ : Union[str, Any] = size
lowerCamelCase_ : Union[str, Any] = resample
lowerCamelCase_ : int = do_center_crop
lowerCamelCase_ : Optional[int] = crop_size
lowerCamelCase_ : Optional[Any] = do_rescale
lowerCamelCase_ : List[Any] = rescale_factor
lowerCamelCase_ : Optional[int] = do_normalize
lowerCamelCase_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ (self : int , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PIL.Image.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Dict , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
_snake_case , size=(size['height'], size['width']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCAmelCase_ (self : Optional[int] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Dict , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def UpperCAmelCase_ (self : Dict , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ) -> Tuple:
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCAmelCase_ (self : Optional[Any] , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCAmelCase_ (self : Tuple , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : str=None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : Optional[int] , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : str = resample if resample is not None else self.resample
lowerCamelCase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ : Any = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ : Union[str, Any] = image_std if image_std is not None else self.image_std
lowerCamelCase_ : Optional[int] = size if size is not None else self.size
lowerCamelCase_ : List[str] = get_size_dict(_snake_case )
lowerCamelCase_ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ : List[str] = get_size_dict(_snake_case , param_name='crop_size' )
lowerCamelCase_ : List[str] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ : Optional[int] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowerCamelCase_ : Optional[Any] = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
lowerCamelCase_ : Tuple = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
lowerCamelCase_ : str = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
lowerCamelCase_ : Optional[int] = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
lowerCamelCase_ : Dict = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCamelCase_ : List[Any] = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 716 |
from __future__ import annotations
import time
UpperCamelCase = list[tuple[int, int]]
UpperCamelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCamelCase__ :
def __init__(self : Union[str, Any] , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : int = pos_y
lowerCamelCase_ : Union[str, Any] = (pos_y, pos_x)
lowerCamelCase_ : Optional[int] = goal_x
lowerCamelCase_ : Dict = goal_y
lowerCamelCase_ : str = parent
class lowerCamelCase__ :
def __init__(self : Any , _snake_case : tuple[int, int] , _snake_case : tuple[int, int] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , _snake_case )
lowerCamelCase_ : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , _snake_case )
lowerCamelCase_ : List[str] = [self.start]
lowerCamelCase_ : str = False
def UpperCAmelCase_ (self : Tuple ) -> Path | None:
"""simple docstring"""
while self.node_queue:
lowerCamelCase_ : Tuple = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : int = True
return self.retrace_path(_snake_case )
lowerCamelCase_ : Tuple = self.get_successors(_snake_case )
for node in successors:
self.node_queue.append(_snake_case )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase_ (self : List[str] , _snake_case : Node ) -> list[Node]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = []
for action in delta:
lowerCamelCase_ : Tuple = parent.pos_x + action[1]
lowerCamelCase_ : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , _snake_case ) )
return successors
def UpperCAmelCase_ (self : List[Any] , _snake_case : Node | None ) -> Path:
"""simple docstring"""
lowerCamelCase_ : List[Any] = node
lowerCamelCase_ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : Optional[Any] = current_node.parent
path.reverse()
return path
class lowerCamelCase__ :
def __init__(self : Optional[int] , _snake_case : Tuple , _snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = BreadthFirstSearch(_snake_case , _snake_case )
lowerCamelCase_ : Optional[Any] = BreadthFirstSearch(_snake_case , _snake_case )
lowerCamelCase_ : int = False
def UpperCAmelCase_ (self : Dict ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : Union[str, Any] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : List[Any] = True
return self.retrace_bidirectional_path(
_snake_case , _snake_case )
lowerCamelCase_ : List[str] = current_bwd_node
lowerCamelCase_ : Dict = current_fwd_node
lowerCamelCase_ : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(_snake_case ),
self.bwd_bfs: self.bwd_bfs.get_successors(_snake_case ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_snake_case )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase_ (self : str , _snake_case : Node , _snake_case : Node ) -> Path:
"""simple docstring"""
lowerCamelCase_ : int = self.fwd_bfs.retrace_path(_snake_case )
lowerCamelCase_ : List[str] = self.bwd_bfs.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCamelCase = (0, 0)
UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase = time.time()
UpperCamelCase = BreadthFirstSearch(init, goal)
UpperCamelCase = bfs.search()
UpperCamelCase = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
UpperCamelCase = time.time()
UpperCamelCase = BidirectionalBreadthFirstSearch(init, goal)
UpperCamelCase = bd_bfs.search()
UpperCamelCase = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 144 | 0 |
from __future__ import annotations
def _lowercase ( UpperCamelCase_ = 4 ) -> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = abs(UpperCamelCase_ ) or 4
return [[1 + x + y * row_size for x in range(UpperCamelCase_ )] for y in range(UpperCamelCase_ )]
def _lowercase ( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(UpperCamelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def _lowercase ( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(UpperCamelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowercase ( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(UpperCamelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def _lowercase ( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [list(UpperCamelCase_ ) for x in zip(*UpperCamelCase_ )]
return matrix
def _lowercase ( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = matrix[::-1]
return matrix
def _lowercase ( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [x[::-1] for x in matrix]
return matrix
def _lowercase ( UpperCamelCase_ ) -> None:
'''simple docstring'''
for i in matrix:
print(*UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
__snake_case = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
__snake_case = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 472 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 472 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase = 1.054_571_817e-34 # unit of ℏ : J * s
_lowerCAmelCase = 3e8 # unit of c : m * s^-1
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
A_ : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A_ : str = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A_ : Optional[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 481 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCAmelCase :
def __init__( self , a__ , a__=13 , a__=10 , a__=3 , a__=2 , a__=2 , a__=2 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=0.9 , a__=None , ):
A_ : Tuple = parent
A_ : Union[str, Any] = batch_size
A_ : str = image_size
A_ : Union[str, Any] = num_channels
A_ : List[str] = patch_size
A_ : Optional[Any] = tubelet_size
A_ : List[Any] = num_frames
A_ : str = is_training
A_ : List[Any] = use_labels
A_ : List[str] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : str = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Dict = hidden_act
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = type_sequence_label_size
A_ : int = initializer_range
A_ : Dict = mask_ratio
A_ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A_ : int = (image_size // patch_size) ** 2
A_ : Dict = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A_ : Dict = int(mask_ratio * self.seq_length )
def _lowerCamelCase ( self ):
A_ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[int] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Dict = VideoMAEModel(config=a__ )
model.to(a__ )
model.eval()
A_ : Dict = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Optional[Any] = VideoMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A_ : List[Any] = torch.ones((self.num_masks,) )
A_ : Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A_ : int = mask.expand(self.batch_size , -1 ).bool()
A_ : List[Any] = model(a__ , a__ )
# model only returns predictions for masked patches
A_ : Union[str, Any] = mask.sum().item()
A_ : Tuple = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowerCamelCase ( self ):
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[Any] = config_and_inputs
A_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def _lowerCamelCase ( self ):
A_ : int = VideoMAEModelTester(self )
A_ : Any = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _lowerCamelCase ( self , a__ , a__ , a__=False ):
A_ : Optional[Any] = copy.deepcopy(a__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A_ : List[Any] = torch.ones((self.model_tester.num_masks,) )
A_ : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A_ : Union[str, Any] = mask.expand(self.model_tester.batch_size , -1 ).bool()
A_ : int = bool_masked_pos.to(a__ )
if return_labels:
if model_class in [
*get_values(a__ ),
]:
A_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def _lowerCamelCase ( self ):
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(a__ )
A_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _lowerCamelCase ( self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowerCamelCase ( self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
@slow
def _lowerCamelCase ( self ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Any = VideoMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _lowerCamelCase ( self ):
if not self.has_attentions:
pass
else:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[int] = True
for model_class in self.all_model_classes:
A_ : str = self.model_tester.seq_length - self.model_tester.num_masks
A_ : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A_ : Optional[int] = True
A_ : Optional[Any] = False
A_ : List[Any] = True
A_ : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : List[Any] = model(**self._prepare_for_class(a__ , a__ ) )
A_ : Dict = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : List[Any] = True
A_ : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : int = model(**self._prepare_for_class(a__ , a__ ) )
A_ : Any = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A_ : Union[str, Any] = len(a__ )
# Check attention is always last and order is fine
A_ : Optional[Any] = True
A_ : int = True
A_ : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : Tuple = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 1 , len(a__ ) )
A_ : Optional[Any] = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
A_ : str = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
A_ : Optional[Any] = outputs.hidden_states
A_ : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a__ ) , a__ )
A_ : str = self.model_tester.seq_length - self.model_tester.num_masks
A_ : str = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Tuple = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCamelCase ( self ):
pass
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" ,filename="""eating_spaghetti.npy""" ,repo_type="""dataset""" )
A_ : Optional[int] = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
A_ : List[str] = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
a__ )
A_ : Any = self.default_image_processor
A_ : Optional[Any] = prepare_video()
A_ : Union[str, Any] = image_processor(a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
A_ : Any = model(**a__ )
# verify the logits
A_ : List[str] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , a__ )
A_ : Optional[Any] = torch.tensor([0.3669, -0.0688, -0.2421] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
A_ : int = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(a__ )
A_ : str = self.default_image_processor
A_ : Any = prepare_video()
A_ : Union[str, Any] = image_processor(a__ , return_tensors="""pt""" ).to(a__ )
# add boolean mask, indicating which patches to mask
A_ : Optional[Any] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A_ : Dict = torch.load(a__ )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**a__ )
# verify the logits
A_ : int = torch.Size([1, 1408, 1536] )
A_ : Union[str, Any] = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=a__ )
self.assertEqual(outputs.logits.shape , a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A_ : Optional[Any] = torch.tensor([0.5142] , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A_ : Optional[int] = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=a__ ).to(
a__ )
with torch.no_grad():
A_ : Optional[Any] = model(**a__ )
A_ : List[Any] = torch.tensor(torch.tensor([0.6469] ) , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1E-4 ) )
| 481 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = 0
_lowerCamelCase = False
_lowerCamelCase = 3.0
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=UpperCamelCase_ ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def lowerCAmelCase__ ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__magic_name__ = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__magic_name__ = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__magic_name__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , UpperCamelCase_ )
@require_multi_gpu
def lowerCAmelCase__ ( self ):
__magic_name__ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCamelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__lowerCamelCase = Accelerator(kwargs_handlers=[ddp_scaler])
__lowerCamelCase = torch.nn.Linear(1_00, 2_00)
__lowerCamelCase = accelerator.prepare(model)
# Check the values changed in kwargs
__lowerCamelCase = ""
__lowerCamelCase = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 490 |
"""simple docstring"""
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
__magic_name__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
__magic_name__ = 0
while b > 0:
if b & 1:
__magic_name__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 490 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 503 |
def _snake_case ( SCREAMING_SNAKE_CASE ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
_lowerCAmelCase : Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_lowerCAmelCase : Optional[int] = 1
if upper_limit > 0:
_lowerCAmelCase : Any = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
__UpperCAmelCase = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 503 | 1 |
def UpperCamelCase ( _A : str , _A : Optional[Any] )-> str:
"""simple docstring"""
A__ = [[] for _ in range(_A )]
A__ = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_A ) <= key:
return input_string
for position, character in enumerate(_A ):
A__ = position % (lowest * 2) # puts it in bounds
A__ = min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_A )
A__ = ["".join(_A ) for row in temp_grid]
A__ = "".join(_A )
return output_string
def UpperCamelCase ( _A : List[str] , _A : Union[str, Any] )-> str:
"""simple docstring"""
A__ = []
A__ = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
A__ = [[] for _ in range(_A )] # generates template
for position in range(len(_A ) ):
A__ = position % (lowest * 2) # puts it in bounds
A__ = min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
A__ = 0
for row in temp_grid: # fills in the characters
A__ = input_string[counter : counter + len(_A )]
grid.append(list(_A ) )
counter += len(_A )
A__ = "" # reads as zigzag
for position in range(len(_A ) ):
A__ = position % (lowest * 2) # puts it in bounds
A__ = min(_A , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCamelCase ( _A : Dict )-> dict[int, str]:
"""simple docstring"""
A__ = {}
for key_guess in range(1 , len(_A ) ): # tries every key
A__ = decrypt(_A , _A )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__UpperCAmelCase =transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __a ( A ) -> Dict:
'''simple docstring'''
if isinstance(A , torch.Tensor ):
return image
elif isinstance(A , PIL.Image.Image ):
A__ = [image]
A__ = [trans(img.convert("RGB" ) ) for img in image]
A__ = torch.stack(A )
return image
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
A__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = min(int(num_inference_steps * strength ) , UpperCamelCase__ )
A__ = max(num_inference_steps - init_timestep , 0 )
A__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase__ )}""" )
A__ = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
A__ = init_latents.shape
A__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
# get latents
print("add noise to latents at timestep" , UpperCamelCase__ )
A__ = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = 0.8 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 50 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ):
'''simple docstring'''
self.check_inputs(UpperCamelCase__ )
# 2. Preprocess image
A__ = preprocess(UpperCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
A__ , A__ = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , self.device )
A__ = timesteps[:1].repeat(UpperCamelCase__ )
# 4. Prepare latent variables
A__ = self.prepare_latents(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.unet.dtype , self.device , UpperCamelCase__ )
A__ = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCamelCase__ ):
# 1. predict noise model_output
A__ = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A__ = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , eta=UpperCamelCase__ , use_clipped_model_output=UpperCamelCase__ , generator=UpperCamelCase__ , ).prev_sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCamelCase__ ) | 337 | 0 |
from math import ceil
def _a ( lowercase__ : int , lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = list(range(0 , lowercase__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
SCREAMING_SNAKE_CASE__ : Any = []
for i in device_map_blocks:
if device_map_blocks.count(lowercase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowercase__ )
# Missing blocks
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [i for i in blocks if i not in device_map_blocks]
SCREAMING_SNAKE_CASE__ : Any = [i for i in device_map_blocks if i not in blocks]
if len(lowercase__ ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowercase__ ) )
def _a ( lowercase__ : str , lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(range(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = int(ceil(n_layers / len(lowercase__ ) ) )
SCREAMING_SNAKE_CASE__ : Tuple = [layers[i : i + n_blocks] for i in range(0 , lowercase__ , lowercase__ )]
return dict(zip(lowercase__ , lowercase__ ) )
| 636 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False , lowercase__ : str=False , lowercase__ : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _a ( lowercase__ : List[str] , lowercase__ : Dict ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : Dict = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = val
@torch.no_grad()
def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if "vqa" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : str = 31_29
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : int = 'vqa2-id2label.json'
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = idalabel
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = ViltForQuestionAnswering(lowercase__ )
elif "nlvr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Dict = {0: 'False', 1: 'True'}
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config.idalabel.items()}
SCREAMING_SNAKE_CASE__ : Tuple = 3
SCREAMING_SNAKE_CASE__ : int = ViltForImagesAndTextClassification(lowercase__ )
elif "irtr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : str = ViltForImageAndTextRetrieval(lowercase__ )
elif "mlm_itm" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Optional[int] = ViltForMaskedLM(lowercase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict']
SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ )
if mlm_model or irtr_model:
SCREAMING_SNAKE_CASE__ : Any = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase__ )
# Define processor
SCREAMING_SNAKE_CASE__ : str = ViltImageProcessor(size=3_84 )
SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE__ : List[Any] = ViltProcessor(lowercase__ , lowercase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Tuple = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[str] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw )
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'a bunch of [MASK] laying on a [MASK].'
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'How many cats are there?'
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ )
# Verify outputs
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size([1, 11, 3_05_22] )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 31_29] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify vqa prediction equals "2"
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size([1, 2] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 636 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
__snake_case : int = parser.parse_args()
__snake_case : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case : int = CLIPImageProcessor()
__snake_case : Any = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
__snake_case : List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 687 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "blip_2_vision_model"
def __init__( self : Any , A__ : Tuple=1_4_0_8 , A__ : Optional[int]=6_1_4_4 , A__ : Any=3_9 , A__ : Optional[int]=1_6 , A__ : Union[str, Any]=2_2_4 , A__ : Tuple=1_4 , A__ : Union[str, Any]="gelu" , A__ : int=0.00_001 , A__ : Any=0.0 , A__ : Union[str, Any]=1E-10 , A__ : List[Any]=True , **A__ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
a__ : Optional[Any] = hidden_size
a__ : Optional[int] = intermediate_size
a__ : str = num_hidden_layers
a__ : List[str] = num_attention_heads
a__ : List[Any] = patch_size
a__ : Optional[int] = image_size
a__ : Any = initializer_range
a__ : Optional[Any] = attention_dropout
a__ : Any = layer_norm_eps
a__ : Dict = hidden_act
a__ : str = qkv_bias
@classmethod
def __lowerCAmelCase ( cls : List[str] , A__ : Union[str, os.PathLike] , **A__ : int ) -> List[Any]:
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
a__ , a__ : Optional[Any] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
a__ : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "blip_2_qformer"
def __init__( self : Union[str, Any] , A__ : int=3_0_5_2_2 , A__ : List[str]=7_6_8 , A__ : Union[str, Any]=1_2 , A__ : Tuple=1_2 , A__ : Any=3_0_7_2 , A__ : Optional[Any]="gelu" , A__ : List[Any]=0.1 , A__ : Any=0.1 , A__ : Optional[Any]=5_1_2 , A__ : Dict=0.02 , A__ : Optional[int]=1E-12 , A__ : Union[str, Any]=0 , A__ : List[str]="absolute" , A__ : Dict=2 , A__ : Tuple=1_4_0_8 , **A__ : Any , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
a__ : List[Any] = vocab_size
a__ : List[Any] = hidden_size
a__ : int = num_hidden_layers
a__ : List[Any] = num_attention_heads
a__ : Optional[Any] = hidden_act
a__ : Optional[Any] = intermediate_size
a__ : List[Any] = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : Tuple = max_position_embeddings
a__ : List[str] = initializer_range
a__ : Optional[int] = layer_norm_eps
a__ : str = position_embedding_type
a__ : int = cross_attention_frequency
a__ : int = encoder_hidden_size
@classmethod
def __lowerCAmelCase ( cls : Any , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> str:
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
a__ , a__ : Any = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
a__ : Optional[Any] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "blip-2"
__UpperCamelCase = True
def __init__( self : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Dict=None , A__ : Dict=3_2 , **A__ : str ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if vision_config is None:
a__ : str = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
a__ : str = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
a__ : Any = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
a__ : Tuple = BlipaVisionConfig(**_lowerCamelCase )
a__ : int = BlipaQFormerConfig(**_lowerCamelCase )
a__ : List[Any] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
a__ : Any = CONFIG_MAPPING[text_model_type](**_lowerCamelCase )
a__ : Tuple = self.text_config.tie_word_embeddings
a__ : List[Any] = self.text_config.is_encoder_decoder
a__ : List[Any] = num_query_tokens
a__ : List[str] = self.vision_config.hidden_size
a__ : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a__ : List[str] = 1.0
a__ : Any = 0.02
@classmethod
def __lowerCAmelCase ( cls : int , A__ : BlipaVisionConfig , A__ : BlipaQFormerConfig , A__ : PretrainedConfig , **A__ : Optional[Any] , ) -> Dict:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCamelCase , )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = copy.deepcopy(self.__dict__ )
a__ : Any = self.vision_config.to_dict()
a__ : str = self.qformer_config.to_dict()
a__ : Tuple = self.text_config.to_dict()
a__ : Any = self.__class__.model_type
return output
| 688 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a_ ( __a ):
return 1 / (1 + np.exp(-z ))
def a_ ( __a , __a ):
return (-y * np.log(__a ) - (1 - y) * np.log(1 - h )).mean()
def a_ ( __a , __a , __a ):
A__ = np.dot(__a , __a )
return np.sum(y * scores - np.log(1 + np.exp(__a ) ) )
def a_ ( __a , __a , __a , __a=7_0000 ):
A__ = np.zeros(x.shape[1] )
for iterations in range(__a ):
A__ = np.dot(__a , __a )
A__ = sigmoid_function(__a )
A__ = np.dot(x.T , h - y ) / y.size
A__ = theta - alpha * gradient # updating the weights
A__ = np.dot(__a , __a )
A__ = sigmoid_function(__a )
A__ = cost_function(__a , __a )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__snake_case : List[Any] = datasets.load_iris()
__snake_case : List[Any] = iris.data[:, :2]
__snake_case : List[str] = (iris.target != 0) * 1
__snake_case : List[Any] = 0.1
__snake_case : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def a_ ( __a ):
return sigmoid_function(
np.dot(__a , __a ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((__snake_case) , (__snake_case)) : Tuple = (x[:, 0].min(), x[:, 0].max())
((__snake_case) , (__snake_case)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__snake_case) , (__snake_case)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__snake_case : int = np.c_[xxa.ravel(), xxa.ravel()]
__snake_case : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 571 | 0 |
'''simple docstring'''
import re
import subprocess
import sys
_lowercase : Union[str, Any] =subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
_lowercase : Union[str, Any] =subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
_lowercase : List[str] ="|".join(sys.argv[1:])
_lowercase : List[str] =re.compile(RF"^({joined_dirs}).*?\.py$")
_lowercase : Union[str, Any] =[x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : str ={
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =[
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 574 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.