code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def __UpperCAmelCase ( __A = 5_0_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = int((limit - 2_4) ** (1 / 2) )
UpperCAmelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE_ ) ) )
for primea in primes:
UpperCAmelCase__ = primea * primea
for primea in primes:
UpperCAmelCase__ = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
UpperCAmelCase__ = primea * primea * primea * primea
UpperCAmelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 475 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __snake_case ( self : str) -> Optional[int]:
A_ = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused')
A_ = load_dataset('ashraq/esc50')
A_ = dataset['train']['audio'][-1]['array']
A_ = audio_classifier(_lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_lowercase) , [{'score': 0.5_01, 'label': 'Sound of a dog'}, {'score': 0.4_99, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF')
def __snake_case ( self : Tuple) -> str:
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any]) -> Tuple:
A_ = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
A_ = load_dataset('ashraq/esc50')
A_ = dataset['train']['audio'][-1]['array']
A_ = audio_classifier(_lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_lowercase) , [
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
] , )
A_ = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_lowercase) , [
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
A_ = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5)
self.assertEqual(
nested_simplify(_lowercase) , [
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF')
def __snake_case ( self : List[str]) -> str:
pass
| 366 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowercase :
def __init__( self : List[Any] , _lowercase : list[list[int]] ):
SCREAMING_SNAKE_CASE__ : List[str] = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_lowercase ) != 0:
SCREAMING_SNAKE_CASE__ : Tuple = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_lowercase ) != cols:
raise error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise error
SCREAMING_SNAKE_CASE__ : str = rows
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def lowercase__ ( self : Optional[int] ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase__ ( self : List[Any] ):
return len(self.rows )
@property
def lowercase__ ( self : Tuple ):
return len(self.rows[0] )
@property
def lowercase__ ( self : Optional[int] ):
return (self.num_rows, self.num_columns)
@property
def lowercase__ ( self : Tuple ):
return self.order[0] == self.order[1]
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Dict = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_lowercase )
def lowercase__ ( self : Optional[int] ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase__ ( self : Tuple ):
return bool(self.determinant() )
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : int = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_lowercase ).determinant()
def lowercase__ ( self : List[str] , _lowercase : int , _lowercase : int ):
if (row + column) % 2 == 0:
return self.get_minor(_lowercase , _lowercase )
return -1 * self.get_minor(_lowercase , _lowercase )
def lowercase__ ( self : List[str] ):
return Matrix(
[
[self.get_minor(_lowercase , _lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase__ ( self : Tuple ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : List[str] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Any = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : List[Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_lowercase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def lowercase__ ( self : Any , _lowercase : list[int] , _lowercase : int | None = None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : str = self.rows[0:position] + [row] + self.rows[position:]
def lowercase__ ( self : List[Any] , _lowercase : list[int] , _lowercase : int | None = None ):
SCREAMING_SNAKE_CASE__ : List[str] = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in column:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , _lowercase : object ):
if not isinstance(_lowercase , _lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Optional[int] , _lowercase : object ):
return not self == other
def __neg__( self : Optional[int] ):
return self * -1
def __add__( self : Dict , _lowercase : Matrix ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , _lowercase : Matrix ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Any , _lowercase : Matrix | int | float ):
if isinstance(_lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_lowercase , _lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_lowercase , _lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self : int , _lowercase : int ):
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase__ ( cls : str , _lowercase : list[int] , _lowercase : list[int] ):
return sum(row[i] * column[i] for i in range(len(_lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ :int = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
def __init__( self : List[Any] , **_lowercase : Optional[int] ):
super().__init__(**_lowercase )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(_lowercase )
def __call__( self : Tuple , _lowercase : Union[str, "Image.Image", List[Dict[str, Any]]] , _lowercase : Union[str, List[str]] = None , **_lowercase : Any , ):
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop('''text_queries''' )
if isinstance(_lowercase , (str, Image.Image) ):
SCREAMING_SNAKE_CASE__ : Any = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
SCREAMING_SNAKE_CASE__ : Any = image
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().__call__(_lowercase , **_lowercase )
return results
def lowercase__ ( self : List[str] , **_lowercase : str ):
SCREAMING_SNAKE_CASE__ : Any = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE__ : Any = kwargs['''threshold''']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE__ : Dict = kwargs['''top_k''']
return {}, {}, postprocess_params
def lowercase__ ( self : List[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_image(inputs['''image'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs['''candidate_labels''']
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = candidate_labels.split(''',''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(_lowercase , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_lowercase , return_tensors=self.framework )
yield {
"is_last": i == len(_lowercase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : Optional[int] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : List[str] = model_inputs.pop('''target_size''' )
SCREAMING_SNAKE_CASE__ : str = model_inputs.pop('''candidate_label''' )
SCREAMING_SNAKE_CASE__ : List[str] = model_inputs.pop('''is_last''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model(**_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : List[str]=0.1 , _lowercase : int=None ):
SCREAMING_SNAKE_CASE__ : Any = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_output['''candidate_label''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BaseModelOutput(_lowercase )
SCREAMING_SNAKE_CASE__ : str = self.image_processor.post_process_object_detection(
outputs=_lowercase , threshold=_lowercase , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs['''scores'''][index].item()
SCREAMING_SNAKE_CASE__ : Any = self._get_bounding_box(outputs['''boxes'''][index][0] )
SCREAMING_SNAKE_CASE__ : List[Any] = {'''score''': score, '''label''': label, '''box''': box}
results.append(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x["score"] , reverse=_lowercase )
if top_k:
SCREAMING_SNAKE_CASE__ : List[Any] = results[:top_k]
return results
def lowercase__ ( self : int , _lowercase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = box.int().tolist()
SCREAMING_SNAKE_CASE__ : str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 250 | 0 |
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = [int(SCREAMING_SNAKE_CASE__ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(SCREAMING_SNAKE_CASE__ ) == 4 and all(0 <= int(SCREAMING_SNAKE_CASE__ ) <= 254 for octet in octets )
if __name__ == "__main__":
lowerCamelCase__ = input().strip()
lowerCamelCase__ = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 225 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Any = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
# set absolute/relative position embeddings parameter
_UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : int = True
# hparam_utils.py hparams
_UpperCAmelCase : Optional[int] = 0.664_694
_UpperCAmelCase : Any = 0.207_951
_UpperCAmelCase : Any = 0.121_194
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = 0.0_352_513
_UpperCAmelCase : Union[str, Any] = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
_UpperCAmelCase : List[Any] = 36.4_519
_UpperCAmelCase : Union[str, Any] = 0.903_421
_UpperCAmelCase : List[Any] = 222.088
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : str = True
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : str = 0.763_141
_UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "TABFACT":
_UpperCAmelCase : int = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
elif task == "MLM":
_UpperCAmelCase : List[str] = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase : Optional[Any] = TapasModel(config=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase : Optional[int] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowercase : int = logging.get_logger(__name__)
class _UpperCAmelCase ( lowercase__ ):
a__ : Dict = ["pixel_values"]
def __init__( self : int , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
__UpperCAmelCase = size if size is not None else {"shortest_edge": 2_56}
__UpperCAmelCase = get_size_dict(_lowercase , default_to_square=_lowercase )
__UpperCAmelCase = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
__UpperCAmelCase = get_size_dict(_lowercase )
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_center_crop
__UpperCAmelCase = crop_size
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self : Tuple , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
__UpperCAmelCase = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__UpperCAmelCase = get_resize_output_image_size(_lowercase , size=size['''shortest_edge'''] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[Any] , ):
__UpperCAmelCase = get_size_dict(_lowercase )
return center_crop(_lowercase , size=(size['''height'''], size['''width''']) , data_format=_lowercase , **_lowercase )
def a ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Dict ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def a ( self : Union[str, Any] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def a ( self : Tuple , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(_lowercase , default_to_square=_lowercase )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase = get_size_dict(_lowercase )
__UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase = image_std if image_std is not None else self.image_std
__UpperCAmelCase = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
__UpperCAmelCase = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
__UpperCAmelCase = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
__UpperCAmelCase = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
__UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 704 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_lowercase : List[str] = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Tuple , snake_case_ :List[str] , snake_case_ :List[Any]=False , snake_case_ :List[Any]=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCAmelCase = cached_file(snake_case_ , snake_case_ , force_download=not use_cached_models )
__UpperCAmelCase = config_class.from_json_file(snake_case_ )
__UpperCAmelCase = True
__UpperCAmelCase = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__UpperCAmelCase = model_class(snake_case_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCAmelCase = cached_file(
snake_case_ , snake_case_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCAmelCase = load_pytorch_checkpoint_in_tfa_model(snake_case_ , snake_case_ )
if compare_with_pt_model:
__UpperCAmelCase = tf_model(tf_model.dummy_inputs , training=snake_case_ ) # build the network
__UpperCAmelCase = torch.load(snake_case_ , map_location='''cpu''' )
__UpperCAmelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=snake_case_ , config=snake_case_ , state_dict=snake_case_ )
with torch.no_grad():
__UpperCAmelCase = pt_model(**pt_model.dummy_inputs )
__UpperCAmelCase = pto[0].numpy()
__UpperCAmelCase = tfo[0].numpy()
__UpperCAmelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(snake_case_ , save_format='''h5''' )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] , snake_case_ :int=None , snake_case_ :Optional[int]=None , snake_case_ :List[str]=False , snake_case_ :Optional[int]=False , snake_case_ :Dict=False , snake_case_ :List[Any]=False , ):
if args_model_type is None:
__UpperCAmelCase = list(MODEL_CLASSES.keys() )
else:
__UpperCAmelCase = [args_model_type]
for j, model_type in enumerate(snake_case_ , start=1 ):
print('''=''' * 100 )
print(F''' Converting model type {j}/{len(snake_case_ )}: {model_type}''' )
print('''=''' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCAmelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCAmelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(snake_case_ , snake_case_ ) , start=1 ):
print('''-''' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__UpperCAmelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(snake_case_ )}: {model_shortcut_name} - model_type {model_type}''' )
print('''-''' * 100 )
if config_shortcut_name in aws_config_map:
__UpperCAmelCase = cached_file(snake_case_ , snake_case_ , force_download=not use_cached_models )
else:
__UpperCAmelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCAmelCase = cached_file(snake_case_ , snake_case_ , force_download=not use_cached_models )
else:
__UpperCAmelCase = model_shortcut_name
if os.path.isfile(snake_case_ ):
__UpperCAmelCase = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=snake_case_ , pytorch_checkpoint_path=snake_case_ , config_file=snake_case_ , tf_dump_path=os.path.join(snake_case_ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=snake_case_ , )
if remove_cached_files:
os.remove(snake_case_ )
os.remove(snake_case_ )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
_lowercase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 397 | 0 |
from math import sqrt
def UpperCamelCase_ ( __a = 1_000_000 ) -> int:
a__ : int = 0
a__ : int = 0
a__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__a , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = DownBlockaD # noqa F405
snake_case = "down"
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A_ = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = ResnetDownsampleBlockaD # noqa F405
snake_case = "down"
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
A_ = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = AttnDownBlockaD # noqa F405
snake_case = "down"
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A_ = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = CrossAttnDownBlockaD # noqa F405
snake_case = "down"
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
A_ = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = SimpleCrossAttnDownBlockaD # noqa F405
snake_case = "down"
@property
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A_ = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = SkipDownBlockaD # noqa F405
snake_case = "down"
@property
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=_snake_case )
def lowerCamelCase__ ( self : str ) -> int:
"""simple docstring"""
A_ = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = AttnSkipDownBlockaD # noqa F405
snake_case = "down"
@property
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=_snake_case )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
A_ = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = DownEncoderBlockaD # noqa F405
snake_case = "down"
@property
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_snake_case )
def lowerCamelCase__ ( self : int ) -> Dict:
"""simple docstring"""
A_ = {
"in_channels": 32,
"out_channels": 32,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : int ) -> Any:
"""simple docstring"""
A_ = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = AttnDownEncoderBlockaD # noqa F405
snake_case = "down"
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_temb=_snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A_ = {
"in_channels": 32,
"out_channels": 32,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
A_ = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = UNetMidBlockaD # noqa F405
snake_case = "mid"
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A_ = {
"in_channels": 32,
"temb_channels": 128,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
A_ = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = UNetMidBlockaDCrossAttn # noqa F405
snake_case = "mid"
def lowerCamelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
def lowerCamelCase__ ( self : int ) -> str:
"""simple docstring"""
A_ = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = UNetMidBlockaDSimpleCrossAttn # noqa F405
snake_case = "mid"
@property
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=_snake_case )
def lowerCamelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
def lowerCamelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
A_ = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = UpBlockaD # noqa F405
snake_case = "up"
@property
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def lowerCamelCase__ ( self : int ) -> int:
"""simple docstring"""
A_ = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = ResnetUpsampleBlockaD # noqa F405
snake_case = "up"
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A_ = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = CrossAttnUpBlockaD # noqa F405
snake_case = "up"
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
def lowerCamelCase__ ( self : str ) -> Any:
"""simple docstring"""
A_ = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = SimpleCrossAttnUpBlockaD # noqa F405
snake_case = "up"
@property
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case , include_encoder_hidden_states=_snake_case )
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
A_ = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = AttnUpBlockaD # noqa F405
snake_case = "up"
@property
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
A_ = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = SkipUpBlockaD # noqa F405
snake_case = "up"
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
A_ = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = AttnSkipUpBlockaD # noqa F405
snake_case = "up"
@property
def lowerCamelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = UpDecoderBlockaD # noqa F405
snake_case = "up"
@property
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_temb=_snake_case )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
A_ = {"in_channels": 32, "out_channels": 32}
A_ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
A_ = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(_snake_case )
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = AttnUpDecoderBlockaD # noqa F405
snake_case = "up"
@property
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_snake_case )
def lowerCamelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
A_ = {"in_channels": 32, "out_channels": 32}
A_ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : int ) -> int:
"""simple docstring"""
A_ = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(_snake_case )
| 482 |
"""simple docstring"""
def A_ (__a , __a , __a ):
'''simple docstring'''
A_ = len(__a )
A_ = [[0] * n for i in range(__a )]
for i in range(__a ):
A_ = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
A_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase=[32, 64, 128] , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.02 , _lowerCamelCase=1E-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2"] , _lowerCamelCase=[1, 2] , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = depths
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = window_size
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = use_absolute_embeddings
lowerCAmelCase_ = patch_norm
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = is_training
lowerCAmelCase_ = scope
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = encoder_stride
lowerCAmelCase_ = out_features
lowerCAmelCase_ = out_indices
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = FocalNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase )
lowerCAmelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCAmelCase_ = None
lowerCAmelCase_ = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = FocalNetForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = FocalNetForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = self.type_sequence_label_size
lowerCAmelCase_ = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( __a , __a , unittest.TestCase ):
__A : Tuple = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__A : Any = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : List[str] = False
__A : Optional[Any] = False
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = FocalNetModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=37 , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
return
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = model_class(_lowerCamelCase )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# FocalNet has a different seq_length
lowerCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = reshaped_hidden_states[0].shape
lowerCAmelCase_ = (
reshaped_hidden_states[0].view(_lowerCamelCase , _lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@slow
def UpperCAmelCase_ ( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = FocalNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(_lowerCamelCase )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase_ = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**_lowerCamelCase )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
lowerCAmelCase_ = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __UpperCAmelCase ( __a , unittest.TestCase ):
__A : Dict = (FocalNetBackbone,) if is_torch_available() else ()
__A : List[str] = FocalNetConfig
__A : Optional[int] = False
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = FocalNetModelTester(self )
| 274 | '''simple docstring'''
from __future__ import annotations
from typing import Any
def snake_case_ ( __snake_case : list[Any]) -> None:
create_state_space_tree(__snake_case , [] , 0)
def snake_case_ ( __snake_case : list[Any] , __snake_case : list[Any] , __snake_case : int) -> None:
if index == len(__snake_case):
print(__snake_case)
return
create_state_space_tree(__snake_case , __snake_case , index + 1)
current_subsequence.append(sequence[index])
create_state_space_tree(__snake_case , __snake_case , index + 1)
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 274 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 | '''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
SCREAMING_SNAKE_CASE_ = numpy.array([0, 0])
SCREAMING_SNAKE_CASE_ = numpy.array([0.5, 0.866_0254])
SCREAMING_SNAKE_CASE_ = numpy.array([1, 0])
SCREAMING_SNAKE_CASE_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase__ ( _lowercase : list[numpy.ndarray] , _lowercase : int ) -> list[numpy.ndarray]:
__UpperCAmelCase: int = initial_vectors
for _ in range(_lowercase ):
__UpperCAmelCase: Any = iteration_step(_lowercase )
return vectors
def UpperCamelCase__ ( _lowercase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
__UpperCAmelCase: Optional[int] = []
for i, start_vector in enumerate(vectors[:-1] ):
__UpperCAmelCase: Union[str, Any] = vectors[i + 1]
new_vectors.append(_lowercase )
__UpperCAmelCase: Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase__ ( _lowercase : numpy.ndarray , _lowercase : float ) -> numpy.ndarray:
__UpperCAmelCase: Tuple = numpy.radians(_lowercase )
__UpperCAmelCase, __UpperCAmelCase: Optional[Any] = numpy.cos(_lowercase ), numpy.sin(_lowercase )
__UpperCAmelCase: Tuple = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowercase , _lowercase )
def UpperCamelCase__ ( _lowercase : list[numpy.ndarray] ) -> None:
__UpperCAmelCase: Union[str, Any] = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__UpperCAmelCase, __UpperCAmelCase: Dict = zip(*_lowercase )
plt.plot(_lowercase , _lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 466 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCamelCase_ ( _lowercase ):
_lowercase : jnp.ndarray
@flax_register_to_config
class lowerCamelCase_ ( nn.Module , _lowercase , _lowercase ):
_lowercase : int = 32
_lowercase : int = 4
_lowercase : int = 4
_lowercase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowercase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_lowercase : Union[bool, Tuple[bool]] = False
_lowercase : Tuple[int] = (320, 640, 1280, 1280)
_lowercase : int = 2
_lowercase : Union[int, Tuple[int]] = 8
_lowercase : Optional[Union[int, Tuple[int]]] = None
_lowercase : int = 1280
_lowercase : float = 0.0
_lowercase : bool = False
_lowercase : jnp.dtype = jnp.floataa
_lowercase : bool = True
_lowercase : int = 0
_lowercase : bool = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : jax.random.KeyArray ):
# init input tensors
__A : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
__A : str = jnp.zeros(__A , dtype=jnp.floataa )
__A : Optional[int] = jnp.ones((1,) , dtype=jnp.intaa )
__A : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__A , __A : List[Any] = jax.random.split(__A )
__A : Any = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__A , __A , __A , __A )["params"]
def lowerCAmelCase_ ( self : List[Any] ):
__A : Union[str, Any] = self.block_out_channels
__A : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__A : Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
__A : str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__A : str = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__A : List[Any] = FlaxTimestepEmbedding(__A , dtype=self.dtype )
__A : Optional[int] = self.only_cross_attention
if isinstance(__A , __A ):
__A : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__A , __A ):
__A : int = (num_attention_heads,) * len(self.down_block_types )
# down
__A : Dict = []
__A : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__A : Optional[int] = output_channel
__A : Union[str, Any] = block_out_channels[i]
__A : Dict = i == len(__A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__A : Any = FlaxCrossAttnDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__A : List[Any] = FlaxDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__A )
__A : Tuple = down_blocks
# mid
__A : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__A : Optional[Any] = []
__A : Tuple = list(reversed(__A ) )
__A : Union[str, Any] = list(reversed(__A ) )
__A : List[str] = list(reversed(__A ) )
__A : Union[str, Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__A : List[Any] = output_channel
__A : Any = reversed_block_out_channels[i]
__A : str = reversed_block_out_channels[min(i + 1 , len(__A ) - 1 )]
__A : Union[str, Any] = i == len(__A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__A : Any = FlaxCrossAttnUpBlockaD(
in_channels=__A , out_channels=__A , prev_output_channel=__A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__A : Optional[int] = FlaxUpBlockaD(
in_channels=__A , out_channels=__A , prev_output_channel=__A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__A )
__A : str = output_channel
__A : Optional[Any] = up_blocks
# out
__A : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__A : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , __A : Dict , __A : int , __A : List[str] , __A : List[Any]=None , __A : Any=None , __A : bool = True , __A : bool = False , ):
# 1. time
if not isinstance(__A , jnp.ndarray ):
__A : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__A : int = timesteps.astype(dtype=jnp.floataa )
__A : Union[str, Any] = jnp.expand_dims(__A , 0 )
__A : List[str] = self.time_proj(__A )
__A : str = self.time_embedding(__A )
# 2. pre-process
__A : List[str] = jnp.transpose(__A , (0, 2, 3, 1) )
__A : str = self.conv_in(__A )
# 3. down
__A : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__A , __A ):
__A , __A : Optional[int] = down_block(__A , __A , __A , deterministic=not train )
else:
__A , __A : List[str] = down_block(__A , __A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__A : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
__A , __A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__A : str = new_down_block_res_samples
# 4. mid
__A : Any = self.mid_block(__A , __A , __A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__A : int = down_block_res_samples[-(self.layers_per_block + 1) :]
__A : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__A , __A ):
__A : Optional[int] = up_block(
__A , temb=__A , encoder_hidden_states=__A , res_hidden_states_tuple=__A , deterministic=not train , )
else:
__A : int = up_block(__A , temb=__A , res_hidden_states_tuple=__A , deterministic=not train )
# 6. post-process
__A : Optional[Any] = self.conv_norm_out(__A )
__A : Optional[Any] = nn.silu(__A )
__A : Dict = self.conv_out(__A )
__A : str = jnp.transpose(__A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__A )
| 17 |
"""simple docstring"""
import argparse
import json
import subprocess
def UpperCamelCase ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = (
f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_UpperCAmelCase : List[str] = subprocess.run(_lowerCAmelCase, shell=_lowerCAmelCase, stdout=subprocess.PIPE )
_UpperCAmelCase : List[str] = output.stdout.decode("""utf-8""" )
_UpperCAmelCase : Optional[int] = json.loads(_lowerCAmelCase )
_UpperCAmelCase : Dict = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCAmelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""", """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) > 0:
_UpperCAmelCase : List[str] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> Any:
return values.split(""",""" )
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
lowerCamelCase__ : Dict = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 238 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
snake_case_ : str = tempfile.mkdtemp()
snake_case_ : str = BlipImageProcessor()
snake_case_ : List[Any] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
snake_case_ : List[Any] = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
snake_case_ : Dict = InstructBlipProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).tokenizer
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).qformer_tokenizer
def _lowerCAmelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ : List[str] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Any = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case_ : List[Any] = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case_ : Union[str, Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor.qformer_tokenizer , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Any = self.get_image_processor()
snake_case_ : Any = self.get_tokenizer()
snake_case_ : Tuple = self.get_qformer_tokenizer()
snake_case_ : List[Any] = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = self.prepare_image_inputs()
snake_case_ : Dict = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="np" )
snake_case_ : List[str] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Dict = self.get_image_processor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Any = self.get_qformer_tokenizer()
snake_case_ : Optional[Any] = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = "lower newer"
snake_case_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
snake_case_ : str = qformer_tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : List[Any] = self.get_image_processor()
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : Any = self.get_qformer_tokenizer()
snake_case_ : Dict = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = "lower newer"
snake_case_ : Optional[int] = self.prepare_image_inputs()
snake_case_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Union[str, Any] = self.get_image_processor()
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = self.get_qformer_tokenizer()
snake_case_ : Any = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : str = processor.batch_decode(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Tuple = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[Any] = self.get_qformer_tokenizer()
snake_case_ : List[Any] = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = "lower newer"
snake_case_ : Union[str, Any] = self.prepare_image_inputs()
snake_case_ : Dict = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 704 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = ['image_processor', 'tokenizer']
A : List[Any] = 'ViltImageProcessor'
A : Optional[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> int:
snake_case_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _SCREAMING_SNAKE_CASE , )
snake_case_ : str = kwargs.pop("feature_extractor" )
snake_case_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : str = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
snake_case_ : List[Any] = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel_values + pixel_mask
snake_case_ : str = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[Any] = self.tokenizer.model_input_names
snake_case_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 114 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) -> str:
if not (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
__lowerCamelCase : str = len(__UpperCAmelCase )
__lowerCamelCase : Optional[int] = len(__UpperCAmelCase )
__lowerCamelCase : Optional[int] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowerCamelCase : str = 0
__lowerCamelCase : Optional[Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowerCamelCase : Dict = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowerCamelCase : Any = i
__lowerCamelCase : Dict = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> str:
return " ".join(
"""""".join(word[::-1] ) if len(__UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 299 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=14 ,__lowerCamelCase=7 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=99 ,__lowerCamelCase=32 ,__lowerCamelCase=5 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=16 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=3 ,__lowerCamelCase=4 ,__lowerCamelCase=None ,) -> int:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Optional[Any] = use_token_type_ids
lowerCAmelCase__ : Dict = use_input_mask
lowerCAmelCase__ : Union[str, Any] = use_labels
lowerCAmelCase__ : Dict = use_mc_token_ids
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : Optional[int] = type_sequence_label_size
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : List[Any] = num_choices
lowerCAmelCase__ : Any = scope
lowerCAmelCase__ : Any = self.vocab_size - 1
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : List[str] = None
if self.use_input_mask:
lowerCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : Optional[int] = None
if self.use_mc_token_ids:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
lowerCAmelCase__ : str = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
if self.use_labels:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
lowerCAmelCase__ : Optional[Any] = self.get_config()
lowerCAmelCase__ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,*__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Dict = CTRLModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
model(__lowerCamelCase ,token_type_ids=__lowerCamelCase ,head_mask=__lowerCamelCase )
model(__lowerCamelCase ,token_type_ids=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,*__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = CTRLLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,*__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.num_labels
lowerCAmelCase__ : Any = CTRLForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : List[str] = model(__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =(CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
snake_case_ =(CTRLLMHeadModel,) if is_torch_available() else ()
snake_case_ =(
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ =True
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = CTRLModelTester(self )
lowerCAmelCase__ : Union[str, Any] = ConfigTester(self ,config_class=__lowerCamelCase ,n_embd=37 )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = CTRLModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
pass
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__lowerCamelCase ) # Legal the president is
lowerCAmelCase__ : List[str] = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase__ : List[Any] = model.generate(__lowerCamelCase ,do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() ,__lowerCamelCase )
| 90 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case : str =logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""pixel_values"""]
def __init__(self ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = 1 / 2_55 ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,**__lowerCamelCase ,) -> None:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 2_24}
lowerCAmelCase__ : Union[str, Any] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ,param_name='''crop_size''' )
lowerCAmelCase__ : Optional[int] = do_resize
lowerCAmelCase__ : Any = size
lowerCAmelCase__ : int = resample
lowerCAmelCase__ : Dict = do_center_crop
lowerCAmelCase__ : str = crop_size
lowerCAmelCase__ : Dict = do_rescale
lowerCAmelCase__ : Optional[Any] = rescale_factor
lowerCAmelCase__ : Dict = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : int = do_convert_rgb
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ : Optional[int] = get_resize_output_image_size(__lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> int:
"""simple docstring"""
return rescale(__lowerCamelCase ,scale=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
return normalize(__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = ChannelDimension.FIRST ,**__lowerCamelCase ,) -> PIL.Image.Image:
"""simple docstring"""
lowerCAmelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Tuple = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(__lowerCamelCase ,param_name='''size''' ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : Tuple = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : str = get_size_dict(__lowerCamelCase ,param_name='''crop_size''' ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Optional[int] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Union[str, Any] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Any = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowerCAmelCase__ : str = [self.resize(image=__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=__lowerCamelCase ,size=__lowerCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ : Union[str, Any] = [self.rescale(image=__lowerCamelCase ,scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ : Optional[Any] = [self.normalize(image=__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ) for image in images]
lowerCAmelCase__ : List[Any] = [to_channel_dimension_format(__lowerCamelCase ,__lowerCamelCase ) for image in images]
lowerCAmelCase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase ,tensor_type=__lowerCamelCase )
| 90 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = FunnelConfig.from_json_file(snake_case__ )
print(f'Building PyTorch model from configuration: {config}' )
lowerCAmelCase__ = FunnelBaseModel(snake_case__ ) if base_model else FunnelModel(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 193 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_=None ,a_=None ,a_=None ,**a_ ):
"""simple docstring"""
if tokenize_kwargs is None:
lowerCAmelCase__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
lowerCAmelCase__ = truncation
lowerCAmelCase__ = tokenize_kwargs
lowerCAmelCase__ = {}
if return_tensors is not None:
lowerCAmelCase__ = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.framework
lowerCAmelCase__ = self.tokenizer(a_ ,return_tensors=a_ ,**a_ )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model(**a_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=False ):
"""simple docstring"""
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self ,*a_ ,**a_ ):
"""simple docstring"""
return super().__call__(*a_ ,**a_ )
| 193 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
snake_case_ : int = "docs/source/en/_toctree.yml"
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[Any] = defaultdict(__snake_case )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase_ : Optional[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase_ : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase_ : Dict = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__snake_case ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__snake_case, key=lambda SCREAMING_SNAKE_CASE__ : s["title"].lower() )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> List[Any]:
with open(__snake_case, encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase_ : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase_ : List[Any] = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase_ : str = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase_ : Any = api_doc[model_idx]['''sections''']
UpperCAmelCase_ : Union[str, Any] = [(idx, section) for idx, section in enumerate(__snake_case ) if '''sections''' in section]
UpperCAmelCase_ : Union[str, Any] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase_ : Union[str, Any] = modality_doc['''sections''']
UpperCAmelCase_ : Dict = clean_model_doc_toc(__snake_case )
if old_modality_doc != new_modality_doc:
UpperCAmelCase_ : List[str] = True
if overwrite:
UpperCAmelCase_ : Union[str, Any] = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase_ : Any = model_doc
UpperCAmelCase_ : List[Any] = api_doc
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(__snake_case, allow_unicode=__snake_case ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
snake_case_ : List[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 705 |
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] ={
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : List[str] = '''trajectory_transformer'''
A__ : str = ['''past_key_values''']
A__ : Dict = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , A=1_00 , A=5 , A=1 , A=1 , A=2_49 , A=6 , A=17 , A=25 , A=4 , A=4 , A=1_28 , A=0.1 , A=0.1 , A=0.1 , A=0.0006 , A=5_12 , A=0.02 , A=1e-12 , A=1 , A=True , A=1 , A=5_02_56 , A=5_02_56 , **A , ) -> Tuple:
A: Optional[int] = vocab_size
A: str = action_weight
A: Optional[Any] = reward_weight
A: str = value_weight
A: int = max_position_embeddings
A: Any = block_size
A: str = action_dim
A: int = observation_dim
A: Optional[int] = transition_dim
A: List[Any] = learning_rate
A: List[Any] = n_layer
A: List[Any] = n_head
A: Dict = n_embd
A: Any = embd_pdrop
A: Union[str, Any] = attn_pdrop
A: Any = resid_pdrop
A: Any = initializer_range
A: Union[str, Any] = layer_norm_eps
A: List[Any] = kaiming_initializer_range
A: Union[str, Any] = use_cache
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
| 135 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 135 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a__ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def _UpperCAmelCase ( ):
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case__ = parser.parse_args()
return args.f
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = logging.StreamHandler(sys.stdout)
logger.addHandler(UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(UpperCamelCase__ , """argv""" , UpperCamelCase__):
snake_case__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCamelCase__ , 0.6_66)
@slow
@require_torch_non_multi_gpu
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(UpperCamelCase__)
snake_case__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(UpperCamelCase__)
snake_case__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(UpperCamelCase__)
| 99 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
_lowerCAmelCase : int = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_lowerCAmelCase : str = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def a_ ( UpperCamelCase_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowerCamelCase = []
for i in range(len(UpperCamelCase_ ) ):
lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(UpperCamelCase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(UpperCamelCase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(UpperCamelCase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(UpperCamelCase_ )
return next_generation
def a_ ( UpperCamelCase_ : list[list[int]] , UpperCamelCase_ : int ) -> list[Image.Image]:
"""simple docstring"""
lowerCamelCase = []
for _ in range(UpperCamelCase_ ):
# Create output image
lowerCamelCase = Image.new('RGB' , (len(cells[0] ), len(UpperCamelCase_ )) )
lowerCamelCase = img.load()
# Save cells to image
for x in range(len(UpperCamelCase_ ) ):
for y in range(len(cells[0] ) ):
lowerCamelCase = 2_5_5 - cells[y][x] * 2_5_5
lowerCamelCase = (colour, colour, colour)
# Save image
images.append(UpperCamelCase_ )
lowerCamelCase = new_generation(UpperCamelCase_ )
return images
if __name__ == "__main__":
_lowerCAmelCase : Any = generate_images(GLIDER, 1_6)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 246 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = 42
snake_case = jnp.floataa
snake_case = True
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
super().setup()
lowerCamelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : int , *__snake_case : str , **__snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = super().__call__(*__snake_case , **__snake_case )
lowerCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = FlaxBigBirdForNaturalQuestionsModule
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[str] ) -> Any:
"""simple docstring"""
def cross_entropy(UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any=None ):
lowerCamelCase = logits.shape[-1]
lowerCamelCase = (labels[..., None] == jnp.arange(UpperCamelCase_ )[None]).astype('f4' )
lowerCamelCase = jax.nn.log_softmax(UpperCamelCase_ , axis=-1 )
lowerCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase = reduction(UpperCamelCase_ )
return loss
lowerCamelCase = partial(UpperCamelCase_ , reduction=jnp.mean )
lowerCamelCase = cross_entropy(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = cross_entropy(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = cross_entropy(UpperCamelCase_ , UpperCamelCase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = "google/bigbird-roberta-base"
snake_case = 3_000
snake_case = 10_500
snake_case = 128
snake_case = 3
snake_case = 1
snake_case = 5
# tx_args
snake_case = 3E-5
snake_case = 0.0
snake_case = 20_000
snake_case = 0.0_0_9_5
snake_case = "bigbird-roberta-natural-questions"
snake_case = "training-expt"
snake_case = "data/nq-training.jsonl"
snake_case = "data/nq-validation.jsonl"
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=__snake_case )
lowerCamelCase = os.path.join(self.base_dir , self.save_dir )
lowerCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = 42
snake_case = 4_096 # no dynamic padding on TPUs
def __call__( self : Optional[int] , __snake_case : Any ) -> int:
'''simple docstring'''
lowerCamelCase = self.collate_fn(__snake_case )
lowerCamelCase = jax.tree_util.tree_map(__snake_case , __snake_case )
return batch
def lowerCamelCase__ ( self : Dict , __snake_case : Any ) -> Any:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.fetch_inputs(features['input_ids'] )
lowerCamelCase = {
'input_ids': jnp.array(__snake_case , dtype=jnp.intaa ),
'attention_mask': jnp.array(__snake_case , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def lowerCamelCase__ ( self : Optional[int] , __snake_case : list ) -> str:
'''simple docstring'''
lowerCamelCase = [self._fetch_inputs(__snake_case ) for ids in input_ids]
return zip(*__snake_case )
def lowerCamelCase__ ( self : Tuple , __snake_case : list ) -> int:
'''simple docstring'''
lowerCamelCase = [1 for _ in range(len(__snake_case ) )]
while len(__snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any]=None ) -> Any:
"""simple docstring"""
if seed is not None:
lowerCamelCase = dataset.shuffle(seed=UpperCamelCase_ )
for i in range(len(UpperCamelCase_ ) // batch_size ):
lowerCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase_ )
@partial(jax.pmap , axis_name='batch' )
def a_ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , **UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def loss_fn(UpperCamelCase_ : Optional[int] ):
lowerCamelCase = model_inputs.pop('start_labels' )
lowerCamelCase = model_inputs.pop('end_labels' )
lowerCamelCase = model_inputs.pop('pooled_labels' )
lowerCamelCase = state.apply_fn(**UpperCamelCase_ , params=UpperCamelCase_ , dropout_rng=UpperCamelCase_ , train=UpperCamelCase_ )
lowerCamelCase , lowerCamelCase , lowerCamelCase = outputs
return state.loss_fn(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
lowerCamelCase , lowerCamelCase = jax.random.split(UpperCamelCase_ )
lowerCamelCase = jax.value_and_grad(UpperCamelCase_ )
lowerCamelCase , lowerCamelCase = grad_fn(state.params )
lowerCamelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
lowerCamelCase = jax.lax.pmean(UpperCamelCase_ , 'batch' )
lowerCamelCase = state.apply_gradients(grads=UpperCamelCase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def a_ ( UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = model_inputs.pop('start_labels' )
lowerCamelCase = model_inputs.pop('end_labels' )
lowerCamelCase = model_inputs.pop('pooled_labels' )
lowerCamelCase = state.apply_fn(**UpperCamelCase_ , params=state.params , train=UpperCamelCase_ )
lowerCamelCase , lowerCamelCase , lowerCamelCase = outputs
lowerCamelCase = state.loss_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class lowerCAmelCase ( train_state.TrainState ):
'''simple docstring'''
snake_case = struct.field(pytree_node=__UpperCamelCase )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = None
def lowerCamelCase__ ( self : Any , __snake_case : str , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = model.params
lowerCamelCase = TrainState.create(
apply_fn=model.__call__ , params=__snake_case , tx=__snake_case , loss_fn=__snake_case , )
if ckpt_dir is not None:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = restore_checkpoint(__snake_case , __snake_case )
lowerCamelCase = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
lowerCamelCase , lowerCamelCase = build_tx(**__snake_case )
lowerCamelCase = train_state.TrainState(
step=__snake_case , apply_fn=model.__call__ , params=__snake_case , tx=__snake_case , opt_state=__snake_case , )
lowerCamelCase = args
lowerCamelCase = data_collator
lowerCamelCase = lr
lowerCamelCase = params
lowerCamelCase = jax_utils.replicate(__snake_case )
return state
def lowerCamelCase__ ( self : int , __snake_case : Any , __snake_case : List[Any] , __snake_case : Any ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.args
lowerCamelCase = len(__snake_case ) // args.batch_size
lowerCamelCase = jax.random.PRNGKey(0 )
lowerCamelCase = jax.random.split(__snake_case , jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase = get_batched_dataset(__snake_case , args.batch_size , seed=__snake_case )
lowerCamelCase = 0
for batch in tqdm(__snake_case , total=__snake_case , desc=F'''Running EPOCH-{epoch}''' ):
lowerCamelCase = self.data_collator(__snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase = self.train_step_fn(__snake_case , __snake_case , **__snake_case )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
lowerCamelCase = jax_utils.unreplicate(state.step )
lowerCamelCase = running_loss.item() / i
lowerCamelCase = self.scheduler_fn(state_step - 1 )
lowerCamelCase = self.evaluate(__snake_case , __snake_case )
lowerCamelCase = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(__snake_case ) )
self.logger.log(__snake_case , commit=__snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=__snake_case )
def lowerCamelCase__ ( self : Tuple , __snake_case : Dict , __snake_case : Tuple ) -> Dict:
'''simple docstring'''
lowerCamelCase = get_batched_dataset(__snake_case , self.args.batch_size )
lowerCamelCase = len(__snake_case ) // self.args.batch_size
lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase = 0
for batch in tqdm(__snake_case , total=__snake_case , desc='Evaluating ... ' ):
lowerCamelCase = self.data_collator(__snake_case )
lowerCamelCase = self.val_step_fn(__snake_case , **__snake_case )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def lowerCamelCase__ ( self : Optional[int] , __snake_case : List[str] , __snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = jax_utils.unreplicate(__snake_case )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=' ... ' )
self.model_save_fn(__snake_case , params=state.params )
with open(os.path.join(__snake_case , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__snake_case , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(__snake_case , 'data_collator.joblib' ) )
with open(os.path.join(__snake_case , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , __snake_case )
print('DONE' )
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> str:
"""simple docstring"""
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ' )
with open(os.path.join(UpperCamelCase_ , 'flax_model.msgpack' ) , 'rb' ) as f:
lowerCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase_ , 'opt_state.msgpack' ) , 'rb' ) as f:
lowerCamelCase = from_bytes(state.opt_state , f.read() )
lowerCamelCase = joblib.load(os.path.join(UpperCamelCase_ , 'args.joblib' ) )
lowerCamelCase = joblib.load(os.path.join(UpperCamelCase_ , 'data_collator.joblib' ) )
with open(os.path.join(UpperCamelCase_ , 'training_state.json' ) , 'r' ) as f:
lowerCamelCase = json.load(UpperCamelCase_ )
lowerCamelCase = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = num_train_steps - warmup_steps
lowerCamelCase = optax.linear_schedule(init_value=UpperCamelCase_ , end_value=UpperCamelCase_ , transition_steps=UpperCamelCase_ )
lowerCamelCase = optax.linear_schedule(init_value=UpperCamelCase_ , end_value=1E-7 , transition_steps=UpperCamelCase_ )
lowerCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
def weight_decay_mask(UpperCamelCase_ : List[str] ):
lowerCamelCase = traverse_util.flatten_dict(UpperCamelCase_ )
lowerCamelCase = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase_ )
lowerCamelCase = scheduler_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = optax.adamw(learning_rate=UpperCamelCase_ , weight_decay=UpperCamelCase_ , mask=UpperCamelCase_ )
return tx, lr
| 246 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A =(3, 9, -11, 0, 7, 5, 1, -1)
A =(4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _a :
__a : int
__a : Node | None
class _a :
def __init__( self : Union[str, Any] , lowercase : Iterable[int] ):
'''simple docstring'''
UpperCAmelCase = None
for i in sorted(lowercase , reverse=lowercase ):
UpperCAmelCase = Node(lowercase , self.head )
def __iter__( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.head
while node:
yield node.data
UpperCAmelCase = node.next_node
def __len__( self : int ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Any ):
'''simple docstring'''
return " -> ".join([str(lowercase ) for node in self] )
def snake_case_ (_a : SortedLinkedList , _a : SortedLinkedList ):
return SortedLinkedList(list(_a ) + list(_a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A =SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 711 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( __a , unittest.TestCase ):
__a : Any = MgpstrTokenizer
__a : Optional[Any] = False
__a : str = {}
__a : Optional[int] = False
def A ( self : Dict ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase ) + '''\n''' )
def A ( self : int , **lowercase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : int , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = '''tester'''
UpperCAmelCase = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=lowercase )
self.assertEqual(len(lowercase ) , 1 )
UpperCAmelCase = tokenizer.decode(lowercase , skip_special_tokens=lowercase )
self.assertTrue(special_token not in decoded )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase , UpperCAmelCase = self.get_input_output_texts(lowercase )
UpperCAmelCase = tokenizer.tokenize(lowercase )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase )
self.assertNotEqual(len(lowercase ) , 0 )
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , lowercase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def A ( self : str ):
'''simple docstring'''
pass
| 358 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=lowerCAmelCase ):
__lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class snake_case_ ( metaclass=lowerCAmelCase ):
__lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class snake_case_ ( metaclass=lowerCAmelCase ):
__lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class snake_case_ ( metaclass=lowerCAmelCase ):
__lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class snake_case_ ( metaclass=lowerCAmelCase ):
__lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class snake_case_ ( metaclass=lowerCAmelCase ):
__lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 345 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ ( lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase : Any = TransfoXLTokenizer
__lowerCamelCase : int = False
__lowerCamelCase : str = False
def __A ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : int = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __A ( self , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = '<unk> UNwanted , running'
SCREAMING_SNAKE_CASE_ : List[str] = '<unk> unwanted, running'
return input_text, output_text
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(__lowerCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [0, 4, 8, 7] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
SCREAMING_SNAKE_CASE_ : Dict = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCAmelCase ) , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(__lowerCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 345 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( lowerCAmelCase__ ):
def __init__( self , _A , _A , _A , _A = None , ):
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A)
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(','):
SCREAMING_SNAKE_CASE_ = int(_A)
SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items()))
def lowerCAmelCase__ ( self , _A):
if not isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = list(_A)
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ):
SCREAMING_SNAKE_CASE_ = len(_A)
SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1)
SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device)
SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2]
SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0)
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A)
SCREAMING_SNAKE_CASE_ = t
if not torch.is_tensor(_A):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps'
if isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
SCREAMING_SNAKE_CASE_ = self.transformer(
_A , timestep=_A , class_labels=_A).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0)
SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0)
SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1)
else:
SCREAMING_SNAKE_CASE_ = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0)
else:
SCREAMING_SNAKE_CASE_ = latent_model_input
SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample
SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A)
| 620 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __snake_case :
def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2):
SCREAMING_SNAKE_CASE_ = bp_numa
SCREAMING_SNAKE_CASE_ = bp_numa
SCREAMING_SNAKE_CASE_ = bp_numa
SCREAMING_SNAKE_CASE_ = conva_get[:2]
SCREAMING_SNAKE_CASE_ = conva_get[2]
SCREAMING_SNAKE_CASE_ = size_pa
SCREAMING_SNAKE_CASE_ = rate_w
SCREAMING_SNAKE_CASE_ = rate_t
SCREAMING_SNAKE_CASE_ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1
SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1
SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1
def lowerCAmelCase__ ( self , _A):
# save model dict with pickle
SCREAMING_SNAKE_CASE_ = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(_A , 'wb') as f:
pickle.dump(_A , _A)
print(f"""Model saved: {save_path}""")
@classmethod
def lowerCAmelCase__ ( cls , _A):
# read saved model
with open(_A , 'rb') as f:
SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301
SCREAMING_SNAKE_CASE_ = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1')
SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1')
SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2')
SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3')
SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight')
SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre')
# create model instance
SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A)
# modify model parameter
SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1')
SCREAMING_SNAKE_CASE_ = model_dic.get('wkj')
SCREAMING_SNAKE_CASE_ = model_dic.get('vji')
SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1')
SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2')
SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3')
return conv_ins
def lowerCAmelCase__ ( self , _A):
return 1 / (1 + np.exp(-1 * x))
def lowerCAmelCase__ ( self , _A):
return round(_A , 3)
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A):
# convolution process
SCREAMING_SNAKE_CASE_ = convs[0]
SCREAMING_SNAKE_CASE_ = convs[1]
SCREAMING_SNAKE_CASE_ = np.shape(_A)[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE_ = []
for i_focus in range(0 , size_data - size_conv + 1 , _A):
for j_focus in range(0 , size_data - size_conv + 1 , _A):
SCREAMING_SNAKE_CASE_ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_A)
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1)
for i_map in range(_A):
SCREAMING_SNAKE_CASE_ = []
for i_focus in range(len(_A)):
SCREAMING_SNAKE_CASE_ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(_A))
SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(
_A , _A)
data_featuremap.append(_A)
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE_ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_A))
SCREAMING_SNAKE_CASE_ = np.asarray(_A)
return focus_list, data_featuremap
def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"):
# pooling process
SCREAMING_SNAKE_CASE_ = len(featuremaps[0])
SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling)
SCREAMING_SNAKE_CASE_ = []
for i_map in range(len(_A)):
SCREAMING_SNAKE_CASE_ = featuremaps[i_map]
SCREAMING_SNAKE_CASE_ = []
for i_focus in range(0 , _A , _A):
for j_focus in range(0 , _A , _A):
SCREAMING_SNAKE_CASE_ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_A))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_A))
SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A)
featuremap_pooled.append(_A)
return featuremap_pooled
def lowerCAmelCase__ ( self , _A):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A)):
SCREAMING_SNAKE_CASE_ = np.shape(data[i])
SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1])
SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0]
data_expanded.extend(_A)
SCREAMING_SNAKE_CASE_ = np.asarray(_A)
return data_expanded
def lowerCAmelCase__ ( self , _A):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE_ = np.asarray(_A)
SCREAMING_SNAKE_CASE_ = np.shape(_A)
SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
for i_map in range(_A):
SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map))
for i in range(0 , _A , _A):
for j in range(0 , _A , _A):
SCREAMING_SNAKE_CASE_ = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE_ = i_pool + 1
SCREAMING_SNAKE_CASE_ = np.multiply(
_A , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(_A)
return pd_all
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool):
# model traning
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(_A)))
print((' - - Shape: Teach_Data ', np.shape(_A)))
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 10000
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE_ = 0
print(f"""-------------Learning Time {rp}--------------""")
for p in range(len(_A)):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p])
SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga)
SCREAMING_SNAKE_CASE_ = np.shape(_A)
SCREAMING_SNAKE_CASE_ = self._expand(_A)
SCREAMING_SNAKE_CASE_ = data_bp_input
SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE_ = np.multiply(
(data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa)))
SCREAMING_SNAKE_CASE_ = np.multiply(
np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa)))
SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji)
SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool(
_A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv])
SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A)
SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
SCREAMING_SNAKE_CASE_ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE_ = rp + 1
SCREAMING_SNAKE_CASE_ = error_count / patterns
all_mse.append(_A)
def draw_error():
SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(_A , '+-')
plt.plot(_A , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(_A , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}"""))
if draw_e:
draw_error()
return mse
def lowerCAmelCase__ ( self , _A):
# model predict
SCREAMING_SNAKE_CASE_ = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(_A)))
for p in range(len(_A)):
SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga)
SCREAMING_SNAKE_CASE_ = self._expand(_A)
SCREAMING_SNAKE_CASE_ = data_bp_input
SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
produce_out.extend(bp_outa.getA().tolist())
SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out]
return np.asarray(_A)
def lowerCAmelCase__ ( self , _A):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE_ = np.asmatrix(_A)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 620 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class snake_case ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , a_ : List[str] , a_ : List[str] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = params
SCREAMING_SNAKE_CASE__ : List[str] = np.array(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([len(a_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[str] , a_ : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : str )-> List[str]:
"""simple docstring"""
return len(self.lengths )
def __lowercase( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowercase( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.params.max_model_input_size
SCREAMING_SNAKE_CASE__ : Tuple = self.lengths > max_len
logger.info(F'''Splitting {sum(a_ )} too long sequences.''' )
def divide_chunks(a_ : List[str] , a_ : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(a_ ) , a_ )]
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
if self.params.mlm:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.insert(a_ , 0 , a_ )
if sub_s[-1] != sep_id:
SCREAMING_SNAKE_CASE__ : List[Any] = np.insert(a_ , len(a_ ) , a_ )
assert len(a_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a_ )
new_tok_ids.extend(a_ )
new_lengths.extend([len(a_ ) for l in sub_seqs] )
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(a_ )
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = len(self )
SCREAMING_SNAKE_CASE__ : Dict = self.lengths > 11
SCREAMING_SNAKE_CASE__ : List[str] = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ : Dict = self.lengths[indices]
SCREAMING_SNAKE_CASE__ : Any = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __lowercase( self : Union[str, Any] )-> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
SCREAMING_SNAKE_CASE__ : Any = self.params.special_tok_ids['unk_token']
SCREAMING_SNAKE_CASE__ : Tuple = len(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
SCREAMING_SNAKE_CASE__ : Tuple = (unk_occs / self.lengths) < 0.5
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ : Any = self.lengths[indices]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowercase( self : List[Any] , a_ : List[str] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [t[0] for t in batch]
SCREAMING_SNAKE_CASE__ : Any = [t[1] for t in batch]
assert len(a_ ) == len(a_ )
# Max for paddings
SCREAMING_SNAKE_CASE__ : Tuple = max(a_ )
# Pad token ids
if self.params.mlm:
SCREAMING_SNAKE_CASE__ : List[Any] = self.params.special_tok_ids['pad_token']
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.params.special_tok_ids['unk_token']
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list(t.astype(a_ ) ) + [pad_idx] * (max_seq_len_ - len(a_ )) for t in token_ids]
assert len(tk_ ) == len(a_ )
assert all(len(a_ ) == max_seq_len_ for t in tk_ )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(a_ ) # (bs)
return tk_t, lg_t
| 85 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Path , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , ):
if config_name_or_path is None:
a__ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
a__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
a__ = question_encoder_name_or_path
a__ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
a__ = RagConfig.from_pretrained(__lowerCAmelCase )
a__ = AutoConfig.from_pretrained(__lowerCAmelCase )
a__ = AutoConfig.from_pretrained(__lowerCAmelCase )
a__ = gen_config
a__ = question_encoder_config
a__ = model_class.from_pretrained_question_encoder_generator(
__lowerCAmelCase , __lowerCAmelCase , config=__lowerCAmelCase )
rag_model.save_pretrained(__lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(__lowerCAmelCase )
# Save tokenizers.
a__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
a__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
snake_case : Union[str, Any] = parser.parse_args()
snake_case : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 335 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Any = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(__A ) != 0:
lowerCAmelCase__ : Union[str, Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A ,(int, float) ):
raise error
lowerCAmelCase__ : List[str] = rows
else:
lowerCAmelCase__ : Any = []
def UpperCAmelCase_ ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.rows )
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.rows[0] )
@property
def UpperCAmelCase_ ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCAmelCase_ ( self ) -> bool:
return self.order[0] == self.order[1]
def UpperCAmelCase_ ( self ) -> Matrix:
lowerCAmelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def UpperCAmelCase_ ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCAmelCase_ ( self ) -> bool:
return bool(self.determinant() )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : str = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(__A ,__A )
return -1 * self.get_minor(__A ,__A )
def UpperCAmelCase_ ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(__A ,__A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCAmelCase_ ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCAmelCase_ ( self ) -> Matrix:
lowerCAmelCase__ : str = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def UpperCAmelCase_ ( self ) -> Matrix:
lowerCAmelCase__ : Optional[Any] = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(__A ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> None:
lowerCAmelCase__ : Optional[int] = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(__A ,__A ):
raise type_error
for value in row:
if not isinstance(__A ,(int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(__A )
else:
lowerCAmelCase__ : List[Any] = self.rows[0:position] + [row] + self.rows[position:]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> None:
lowerCAmelCase__ : Union[str, Any] = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(__A ,__A ):
raise type_error
for value in column:
if not isinstance(__A ,(int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
lowerCAmelCase__ : Union[str, Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCAmelCase__ : Union[str, Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self ,__UpperCAmelCase ) -> bool:
if not isinstance(__A ,__A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self ,__UpperCAmelCase ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self ,__UpperCAmelCase ) -> Matrix:
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self ,__UpperCAmelCase ) -> Matrix:
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self ,__UpperCAmelCase ) -> Matrix:
if isinstance(__A ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A ,__A ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(__A ,__A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self ,__UpperCAmelCase ) -> Matrix:
if not isinstance(__A ,__A ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
lowerCAmelCase__ : str = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowerCAmelCase = datasets.logging.get_logger(__name__)
_lowerCAmelCase = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_lowerCAmelCase = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_lowerCAmelCase = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="dummy_doc" ):
"""simple docstring"""
lowerCAmelCase__ : str = {doc: key_lines}
lowerCAmelCase__ : Tuple = {doc: sys_lines}
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = reader.get_doc_mentions(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase__ : Optional[int] = reader.set_annotated_parse_trees(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reader.get_doc_mentions(UpperCamelCase , sys_doc_lines[doc] , UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase__ : List[str] = reader.set_annotated_parse_trees(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase , UpperCamelCase )
if remove_nested:
lowerCAmelCase__ , lowerCAmelCase__ : str = reader.remove_nested_coref_mentions(UpperCamelCase , UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCAmelCase__ , lowerCAmelCase__ : Any = reader.remove_nested_coref_mentions(UpperCamelCase , UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCAmelCase__ : Optional[int] = reader.get_mention_assignments(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = reader.get_mention_assignments(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"""Number of resulting singleton clusters in the key """
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"""files, respectively""" )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = get_coref_infos(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = 0
for name, metric in metrics:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = evaluator.evaluate_documents(UpperCamelCase , UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
lowerCAmelCase__ : Any = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
lowerCAmelCase__ : List[Any] = line.split()[5]
if not parse_col == "-":
lowerCAmelCase__ : Union[str, Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) ,codebase_urls=["""https://github.com/ns-moosavi/coval"""] ,reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ) -> str:
lowerCAmelCase__ : List[str] = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
lowerCAmelCase__ : Optional[int] = util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCAmelCase__ : Dict = evaluate(
key_lines=__UpperCAmelCase ,sys_lines=__UpperCAmelCase ,metrics=__UpperCAmelCase ,NP_only=__UpperCAmelCase ,remove_nested=__UpperCAmelCase ,keep_singletons=__UpperCAmelCase ,min_span=__UpperCAmelCase ,)
return score
| 160 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a : Union[str, Any] = logging.get_logger(__name__)
# General docstring
_a : Optional[Any] = 'RegNetConfig'
# Base docstring
_a : List[str] = 'facebook/regnet-y-040'
_a : List[str] = [1, 1_088, 7, 7]
# Image classification docstring
_a : Union[str, Any] = 'facebook/regnet-y-040'
_a : Dict = 'tabby, tabby cat'
_a : Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( tf.keras.layers.Layer ):
def __init__( self , a__ , a__ = 3 , a__ = 1 , a__ = 1 , a__ = "relu" , **a__ , ):
super().__init__(**_snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCAmelCase : Tuple = tf.keras.layers.ConvaD(
filters=_snake_case , kernel_size=_snake_case , strides=_snake_case , padding="""VALID""" , groups=_snake_case , use_bias=_snake_case , name="""convolution""" , )
_lowerCAmelCase : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
_lowerCAmelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.convolution(self.padding(_snake_case ) )
_lowerCAmelCase : Tuple = self.normalization(_snake_case )
_lowerCAmelCase : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self , a__ , **a__ ):
super().__init__(**_snake_case )
_lowerCAmelCase : List[str] = config.num_channels
_lowerCAmelCase : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = shape_list(_snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase : List[Any] = tf.transpose(_snake_case , perm=(0, 2, 3, 1) )
_lowerCAmelCase : Tuple = self.embedder(_snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self , a__ , a__ = 2 , **a__ ):
super().__init__(**_snake_case )
_lowerCAmelCase : Optional[int] = tf.keras.layers.ConvaD(
filters=_snake_case , kernel_size=1 , strides=_snake_case , use_bias=_snake_case , name="""convolution""" )
_lowerCAmelCase : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
def __A ( self , a__ , a__ = False ):
return self.normalization(self.convolution(_snake_case ) , training=_snake_case )
class __A ( tf.keras.layers.Layer ):
def __init__( self , a__ , a__ , **a__ ):
super().__init__(**_snake_case )
_lowerCAmelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case , name="""pooler""" )
_lowerCAmelCase : List[str] = [
tf.keras.layers.ConvaD(filters=_snake_case , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=_snake_case , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def __A ( self , a__ ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_lowerCAmelCase : Optional[Any] = self.pooler(_snake_case )
for layer_module in self.attention:
_lowerCAmelCase : Optional[int] = layer_module(_snake_case )
_lowerCAmelCase : Optional[int] = hidden_state * pooled
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self , a__ , a__ , a__ , a__ = 1 , **a__ ):
super().__init__(**_snake_case )
_lowerCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_lowerCAmelCase : str = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : List[str] = (
TFRegNetShortCut(_snake_case , stride=_snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase : str = [
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
_snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=_snake_case , name="""layer.2""" ),
]
_lowerCAmelCase : Dict = ACTaFN[config.hidden_act]
def __A ( self , a__ ):
_lowerCAmelCase : Dict = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : List[str] = layer_module(_snake_case )
_lowerCAmelCase : Any = self.shortcut(_snake_case )
hidden_state += residual
_lowerCAmelCase : int = self.activation(_snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self , a__ , a__ , a__ , a__ = 1 , **a__ ):
super().__init__(**_snake_case )
_lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCAmelCase : List[Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Any = (
TFRegNetShortCut(_snake_case , stride=_snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
_lowerCAmelCase : Optional[int] = [
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
_snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(_snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=_snake_case , name="""layer.3""" ),
]
_lowerCAmelCase : Optional[Any] = ACTaFN[config.hidden_act]
def __A ( self , a__ ):
_lowerCAmelCase : Any = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : Optional[Any] = layer_module(_snake_case )
_lowerCAmelCase : List[str] = self.shortcut(_snake_case )
hidden_state += residual
_lowerCAmelCase : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self , a__ , a__ , a__ , a__ = 2 , a__ = 2 , **a__ ):
super().__init__(**_snake_case )
_lowerCAmelCase : Tuple = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
_lowerCAmelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_snake_case , _snake_case , _snake_case , stride=_snake_case , name="""layers.0""" ),
*[layer(_snake_case , _snake_case , _snake_case , name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def __A ( self , a__ ):
for layer_module in self.layers:
_lowerCAmelCase : Tuple = layer_module(_snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self , a__ , **a__ ):
super().__init__(**_snake_case )
_lowerCAmelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
_lowerCAmelCase : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_snake_case , _snake_case , _snake_case , depth=_snake_case , name=F"stages.{i+1}" ) )
def __A ( self , a__ , a__ = False , a__ = True ):
_lowerCAmelCase : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_lowerCAmelCase : Optional[int] = stage_module(_snake_case )
if output_hidden_states:
_lowerCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_snake_case , hidden_states=_snake_case )
@keras_serializable
class __A ( tf.keras.layers.Layer ):
_UpperCamelCase : List[str] = RegNetConfig
def __init__( self , a__ , **a__ ):
super().__init__(**_snake_case )
_lowerCAmelCase : Any = config
_lowerCAmelCase : str = TFRegNetEmbeddings(_snake_case , name="""embedder""" )
_lowerCAmelCase : Tuple = TFRegNetEncoder(_snake_case , name="""encoder""" )
_lowerCAmelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case , name="""pooler""" )
@unpack_inputs
def __A ( self , a__ , a__ = None , a__ = None , a__ = False , ):
_lowerCAmelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Any = self.embedder(_snake_case , training=_snake_case )
_lowerCAmelCase : List[Any] = self.encoder(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case )
_lowerCAmelCase : Optional[int] = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(_snake_case )
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase : Union[str, Any] = tf.transpose(_snake_case , perm=(0, 3, 1, 2) )
_lowerCAmelCase : Tuple = tf.transpose(_snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase : List[Any] = tuple([tf.transpose(_snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case , pooler_output=_snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __A ( _SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Optional[Any] = RegNetConfig
_UpperCamelCase : List[str] = "regnet"
_UpperCamelCase : int = "pixel_values"
@property
def __A ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_a : List[str] = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_a : Any = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _SCREAMING_SNAKE_CASE , )
class __A ( _SCREAMING_SNAKE_CASE ):
def __init__( self , a__ , *a__ , **a__ ):
super().__init__(_snake_case , *_snake_case , **_snake_case )
_lowerCAmelCase : Any = TFRegNetMainLayer(_snake_case , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self , a__ , a__ = None , a__ = None , a__=False , ):
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : str = self.regnet(
pixel_values=_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _SCREAMING_SNAKE_CASE , )
class __A ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
def __init__( self , a__ , *a__ , **a__ ):
super().__init__(_snake_case , *_snake_case , **_snake_case )
_lowerCAmelCase : Dict = config.num_labels
_lowerCAmelCase : Any = TFRegNetMainLayer(_snake_case , name="""regnet""" )
# classification head
_lowerCAmelCase : Dict = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__=False , ):
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.regnet(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case )
_lowerCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : Any = self.classifier[0](_snake_case )
_lowerCAmelCase : Optional[int] = self.classifier[1](_snake_case )
_lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=_snake_case , logits=_snake_case )
if not return_dict:
_lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states )
| 213 | """simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str:
return "".join(sorted(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> list[str]:
return word_by_signature[signature(__UpperCAmelCase )]
_A = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
_A = sorted({word.strip().lower() for word in data.splitlines()})
_A = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_A = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 159 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Dict) -> List[Any]:
__snake_case : Any = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case : Optional[int] = dict(zip(_A , range(len(_A))))
__snake_case : Any = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case : List[Any] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_60_00,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__snake_case : Any = os.path.join(self.tmpdirname , _A)
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_A) + '\n')
with open(self.feature_extraction_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_A) + '\n')
# load decoder from hub
__snake_case : Optional[Any] = 'hf-internal-testing/ngram-beam-search-decoder'
def _lowercase (self : Optional[int] , **_A : Optional[Any]) -> Optional[Any]:
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_A)
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A)
def _lowercase (self : Tuple , **_A : Tuple) -> str:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A)
def _lowercase (self : str , **_A : Any) -> Union[str, Any]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A)
def _lowercase (self : Tuple) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def _lowercase (self : Dict) -> Optional[int]:
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : List[Any] = self.get_decoder()
__snake_case : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A)
processor.save_pretrained(self.tmpdirname)
__snake_case : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname)
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , _A)
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , _A)
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels)
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A)
def _lowercase (self : Union[str, Any]) -> Any:
__snake_case : Dict = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
processor.save_pretrained(self.tmpdirname)
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3)
# decoder
self.assertEqual(processor.language_model.alpha , 5.0)
self.assertEqual(processor.language_model.beta , 3.0)
self.assertEqual(processor.language_model.score_boundary , -7.0)
self.assertEqual(processor.language_model.unk_score_offset , 3)
def _lowercase (self : int) -> int:
__snake_case : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'])
with self.assertRaisesRegex(_A , 'include'):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
def _lowercase (self : Union[str, Any]) -> int:
__snake_case : Dict = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A)
__snake_case : Any = floats_list((3, 10_00))
__snake_case : List[Any] = feature_extractor(_A , return_tensors='np')
__snake_case : Optional[int] = processor(_A , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowercase (self : Optional[Any]) -> Optional[Any]:
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A)
__snake_case : str = 'This is a test string'
__snake_case : Tuple = processor(text=_A)
__snake_case : List[str] = tokenizer(_A)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowercase (self : Tuple , _A : Union[str, Any]=(2, 10, 16) , _A : int=77) -> Any:
np.random.seed(_A)
return np.random.rand(*_A)
def _lowercase (self : str) -> Union[str, Any]:
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A)
__snake_case : Union[str, Any] = self._get_dummy_logits(shape=(10, 16) , seed=13)
__snake_case : Optional[int] = processor.decode(_A)
__snake_case : Optional[Any] = decoder.decode_beams(_A)[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text)
self.assertEqual('</s> <s> </s>' , decoded_processor.text)
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score)
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score)
@parameterized.expand([[None], ['fork'], ['spawn']])
def _lowercase (self : List[str] , _A : Union[str, Any]) -> Tuple:
__snake_case : Dict = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Tuple = self.get_decoder()
__snake_case : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A)
__snake_case : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : str = processor.batch_decode(_A)
else:
with get_context(_A).Pool() as pool:
__snake_case : Any = processor.batch_decode(_A , _A)
__snake_case : List[str] = list(_A)
with get_context('fork').Pool() as p:
__snake_case : Any = decoder.decode_beams_batch(_A , _A)
__snake_case , __snake_case , __snake_case : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0])
logit_scores_decoder.append(beams[0][-2])
lm_scores_decoder.append(beams[0][-1])
self.assertListEqual(_A , decoded_processor.text)
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text)
self.assertListEqual(_A , decoded_processor.logit_score)
self.assertListEqual(_A , decoded_processor.lm_score)
def _lowercase (self : Dict) -> List[str]:
__snake_case : Any = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A)
__snake_case : int = self._get_dummy_logits()
__snake_case : str = 15
__snake_case : List[str] = -20.0
__snake_case : int = -4.0
__snake_case : Dict = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
__snake_case : Tuple = decoded_processor_out.text
__snake_case : Dict = list(_A)
with get_context('fork').Pool() as pool:
__snake_case : Optional[Any] = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
__snake_case : Any = [d[0][0] for d in decoded_decoder_out]
__snake_case : Any = [d[0][2] for d in decoded_decoder_out]
__snake_case : int = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A)
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , _A)
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score))
self.assertTrue(np.allclose([-20.054, -18.447] , _A , atol=1E-3))
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score))
self.assertTrue(np.allclose([-15.554, -13.9_474] , _A , atol=1E-3))
def _lowercase (self : Dict) -> Dict:
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A)
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : List[Any] = 2.0
__snake_case : Optional[int] = 5.0
__snake_case : Union[str, Any] = -20.0
__snake_case : Optional[int] = True
__snake_case : List[str] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
__snake_case : Optional[int] = decoded_processor_out.text
__snake_case : Tuple = list(_A)
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('fork').Pool() as pool:
__snake_case : List[str] = decoder.decode_beams_batch(
_A , _A , )
__snake_case : Tuple = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A)
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , _A)
__snake_case : Dict = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0)
self.assertEqual(lm_model.beta , 5.0)
self.assertEqual(lm_model.unk_score_offset , -20.0)
self.assertEqual(lm_model.score_boundary , _A)
def _lowercase (self : Union[str, Any]) -> Optional[int]:
__snake_case : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
__snake_case : Any = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode('utf-8')).parent.parent.absolute()
__snake_case : Optional[int] = os.listdir(_A)
__snake_case : Optional[Any] = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A)
def _lowercase (self : Any) -> str:
__snake_case : Any = snapshot_download('hf-internal-testing/processor_with_lm')
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained(_A)
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : List[str] = Path(language_model._kenlm_model.path.decode('utf-8')).parent.parent.absolute()
__snake_case : Dict = os.listdir(_A)
__snake_case : List[str] = os.listdir(_A)
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A)
def _lowercase (self : str) -> Any:
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
__snake_case : Union[str, Any] = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm')
__snake_case : Tuple = floats_list((3, 10_00))
__snake_case : Optional[Any] = processor_wavaveca(_A , return_tensors='np')
__snake_case : Optional[int] = processor_auto(_A , return_tensors='np')
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2)
__snake_case : List[Any] = self._get_dummy_logits()
__snake_case : Union[str, Any] = processor_wavaveca.batch_decode(_A)
__snake_case : List[Any] = processor_auto.batch_decode(_A)
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text)
def _lowercase (self : Optional[Any]) -> List[str]:
__snake_case : Optional[int] = self.get_feature_extractor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Tuple = self.get_decoder()
__snake_case : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A)
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def _lowercase (_A : Any , _A : str) -> str:
__snake_case : Any = [d[key] for d in offsets]
return retrieved_list
def _lowercase (self : List[Any]) -> Dict:
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : Any = processor.decode(_A , output_word_offsets=_A)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue('text' in outputs)
self.assertTrue('word_offsets' in outputs)
self.assertTrue(isinstance(_A , _A))
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word')) , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word') , ['<s>', '<s>', '</s>'])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset') , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset') , [1, 3, 5])
def _lowercase (self : Any) -> Dict:
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
__snake_case : List[str] = self._get_dummy_logits()
__snake_case : List[str] = processor.batch_decode(_A , output_word_offsets=_A)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue('text' in outputs)
self.assertTrue('word_offsets' in outputs)
self.assertTrue(isinstance(_A , _A))
self.assertListEqual(
[' '.join(self.get_from_offsets(_A , 'word')) for o in outputs['word_offsets']] , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word') , ['<s>', '<s>', '</s>'])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset') , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset') , [1, 3, 5])
@slow
@require_torch
@require_torchaudio
def _lowercase (self : Any) -> Union[str, Any]:
import torch
__snake_case : int = load_dataset('common_voice' , 'en' , split='train' , streaming=_A)
__snake_case : int = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_60_00))
__snake_case : List[Any] = iter(_A)
__snake_case : List[Any] = next(_A)
__snake_case : Optional[int] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm')
__snake_case : List[Any] = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm')
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : Optional[int] = processor(sample['audio']['array'] , return_tensors='pt').input_values
with torch.no_grad():
__snake_case : Optional[Any] = model(_A).logits.cpu().numpy()
__snake_case : int = processor.decode(logits[0] , output_word_offsets=_A)
__snake_case : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Any = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case : Tuple = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(_A , 'word')) , _A)
self.assertEqual(' '.join(self.get_from_offsets(_A , 'word')) , output.text)
# output times
__snake_case : List[str] = torch.tensor(self.get_from_offsets(_A , 'start_time'))
__snake_case : List[str] = torch.tensor(self.get_from_offsets(_A , 'end_time'))
# fmt: off
__snake_case : Tuple = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599])
__snake_case : Tuple = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94])
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.01))
self.assertTrue(torch.allclose(_A , _A , atol=0.01))
| 192 | """simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : str ) -> list[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = int(UpperCAmelCase_ )
# Initialize Result
__snake_case : int = []
# Traverse through all denomination
for denomination in reversed(UpperCAmelCase_ ):
# Find denominations
while int(UpperCAmelCase_ ) >= int(UpperCAmelCase_ ):
total_value -= int(UpperCAmelCase_ )
answer.append(UpperCAmelCase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_a : Optional[int]= []
_a : Optional[int]= "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
_a : int= int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
_a : Optional[int]= input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
_a : Tuple= [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
_a : List[str]= input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f'''Following is minimal change for {value}: ''')
_a : List[Any]= find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 192 | 1 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=13 , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : str=False , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[Any]=99 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : List[Any]=5 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]="last" , _UpperCAmelCase : Any=True , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[str]=0 , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_lengths
_A = use_token_type_ids
_A = use_labels
_A = gelu_activation
_A = sinusoidal_embeddings
_A = causal
_A = asm
_A = n_langs
_A = vocab_size
_A = n_special
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = summary_type
_A = use_proj
_A = scope
_A = bos_token_id
def lowerCAmelCase_ ( self : str ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_input_lengths:
_A = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , 2 ).float()
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase_ ( self : Tuple ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , ):
_A = XLMModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
_A = model(_UpperCAmelCase , langs=_UpperCAmelCase )
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , ):
_A = XLMWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , ):
_A = XLMForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
_A = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
_A = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , ):
_A = XLMForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
_A = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
_A = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((_A) , ) = result_with_labels.to_tuple()
_A = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((_A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , ):
_A = XLMForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , ):
_A = self.num_labels
_A = XLMForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , ):
_A = self.num_choices
_A = XLMForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase : Optional[int] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any]=False ):
_A = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : Tuple ):
_A = XLMModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=1 ):
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(_UpperCAmelCase ) )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
_A = min_length + idx + 1
_A = min_length + idx + 1
_A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=1 ):
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_UpperCAmelCase ) , )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
_A = min_length + idx + 1
_A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_UpperCAmelCase ) , )
pass
@slow
def lowerCAmelCase_ ( self : Dict ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = XLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : str ):
_A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_UpperCAmelCase )
_A = torch.tensor([[14, 447]] , dtype=torch.long , device=_UpperCAmelCase ) # the president
_A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_A = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _UpperCAmelCase )
| 7 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _a :
'''simple docstring'''
def __init__( self ,__a ,__a ,__a ) -> Tuple:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
snake_case : str = img
snake_case : str = img.shape[1]
snake_case : Union[str, Any] = img.shape[0]
snake_case : Optional[int] = dst_width
snake_case : List[str] = dst_height
snake_case : List[str] = self.src_w / self.dst_w
snake_case : int = self.src_h / self.dst_h
snake_case : List[Any] = (
np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255
)
def snake_case_ ( self ) -> int:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
snake_case : Optional[Any] = self.img[self.get_y(__a )][self.get_x(__a )]
def snake_case_ ( self ,__a ) -> int:
return int(self.ratio_x * x )
def snake_case_ ( self ,__a ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
lowercase, lowercase : Optional[int] = 800, 600
lowercase : Union[str, Any] = imread("""image_data/lena.jpg""", 1)
lowercase : int = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 116 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class a_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Any ,snake_case : List[Any] ,snake_case : Optional[Any]=1 ,snake_case : Optional[int]=False ,**snake_case : Optional[Any] ):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_proj
SCREAMING_SNAKE_CASE =cutoffs + [vocab_size]
SCREAMING_SNAKE_CASE =[0] + self.cutoffs
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =self.cutoffs[0]
SCREAMING_SNAKE_CASE =len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE =self.shortlist_size + self.n_clusters
SCREAMING_SNAKE_CASE =keep_order
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =[]
def _lowerCAmelCase ( self : Dict ,snake_case : Optional[int] ):
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE =self.add_weight(
shape=(self.n_clusters, self.d_embed) ,initializer='zeros' ,trainable=snake_case ,name='cluster_weight' )
SCREAMING_SNAKE_CASE =self.add_weight(
shape=(self.n_clusters,) ,initializer='zeros' ,trainable=snake_case ,name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
SCREAMING_SNAKE_CASE =self.add_weight(
shape=(self.d_embed, self.d_proj) ,initializer='zeros' ,trainable=snake_case ,name=f'out_projs_._{i}' ,)
self.out_projs.append(snake_case )
else:
self.out_projs.append(snake_case )
SCREAMING_SNAKE_CASE =self.add_weight(
shape=(self.vocab_size, self.d_embed) ,initializer='zeros' ,trainable=snake_case ,name=f'out_layers_._{i}_._weight' ,)
SCREAMING_SNAKE_CASE =self.add_weight(
shape=(self.vocab_size,) ,initializer='zeros' ,trainable=snake_case ,name=f'out_layers_._{i}_._bias' ,)
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE =self.d_embed // (self.div_val**i)
SCREAMING_SNAKE_CASE =self.add_weight(
shape=(d_emb_i, self.d_proj) ,initializer='zeros' ,trainable=snake_case ,name=f'out_projs_._{i}' )
self.out_projs.append(snake_case )
SCREAMING_SNAKE_CASE =self.add_weight(
shape=(r_idx - l_idx, d_emb_i) ,initializer='zeros' ,trainable=snake_case ,name=f'out_layers_._{i}_._weight' ,)
SCREAMING_SNAKE_CASE =self.add_weight(
shape=(r_idx - l_idx,) ,initializer='zeros' ,trainable=snake_case ,name=f'out_layers_._{i}_._bias' ,)
self.out_layers.append((weight, bias) )
super().build(snake_case )
@staticmethod
def _lowerCAmelCase ( snake_case : Tuple ,snake_case : List[str] ,snake_case : Any ,snake_case : List[Any]=None ):
SCREAMING_SNAKE_CASE =x
if proj is not None:
SCREAMING_SNAKE_CASE =tf.einsum('ibd,ed->ibe' ,snake_case ,snake_case )
return tf.einsum('ibd,nd->ibn' ,snake_case ,snake_case ) + b
@staticmethod
def _lowerCAmelCase ( snake_case : Tuple ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =shape_list(snake_case )
SCREAMING_SNAKE_CASE =tf.range(lp_size[0] ,dtype=target.dtype )
SCREAMING_SNAKE_CASE =tf.stack([r, target] ,1 )
return tf.gather_nd(snake_case ,snake_case )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Dict ,snake_case : Optional[int]=True ,snake_case : Tuple=False ):
SCREAMING_SNAKE_CASE =0
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE =self._logit(snake_case ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] )
if target is not None:
SCREAMING_SNAKE_CASE =tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case ,logits=snake_case )
SCREAMING_SNAKE_CASE =tf.nn.log_softmax(snake_case ,axis=-1 )
else:
SCREAMING_SNAKE_CASE =shape_list(snake_case )
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
SCREAMING_SNAKE_CASE =(target >= l_idx) & (target < r_idx)
SCREAMING_SNAKE_CASE =tf.where(snake_case )
SCREAMING_SNAKE_CASE =tf.boolean_mask(snake_case ,snake_case ) - l_idx
if self.div_val == 1:
SCREAMING_SNAKE_CASE =self.out_layers[0][0][l_idx:r_idx]
SCREAMING_SNAKE_CASE =self.out_layers[0][1][l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE =self.out_layers[i][0]
SCREAMING_SNAKE_CASE =self.out_layers[i][1]
if i == 0:
SCREAMING_SNAKE_CASE =tf.concat([cur_W, self.cluster_weight] ,0 )
SCREAMING_SNAKE_CASE =tf.concat([cur_b, self.cluster_bias] ,0 )
SCREAMING_SNAKE_CASE =self._logit(snake_case ,snake_case ,snake_case ,self.out_projs[0] )
SCREAMING_SNAKE_CASE =tf.nn.log_softmax(snake_case )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
SCREAMING_SNAKE_CASE =tf.boolean_mask(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =self._gather_logprob(snake_case ,snake_case )
else:
SCREAMING_SNAKE_CASE =self._logit(snake_case ,snake_case ,snake_case ,self.out_projs[i] )
SCREAMING_SNAKE_CASE =tf.nn.log_softmax(snake_case )
SCREAMING_SNAKE_CASE =self.cutoffs[0] + i - 1 # No probability for the head cluster
SCREAMING_SNAKE_CASE =head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(snake_case )
if target is not None:
SCREAMING_SNAKE_CASE =tf.boolean_mask(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =tf.boolean_mask(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =self._gather_logprob(snake_case ,snake_case )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(snake_case ,-cur_logprob ,shape_list(snake_case ) )
SCREAMING_SNAKE_CASE =tf.concat(snake_case ,axis=-1 )
if target is not None:
if return_mean:
SCREAMING_SNAKE_CASE =tf.reduce_mean(snake_case )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(snake_case )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(snake_case ,name=self.name ,aggregation='mean' if return_mean else '' )
return out
| 252 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'encoder-decoder'
__UpperCAmelCase = True
def __init__( self : Dict ,**snake_case : Any ):
super().__init__(**snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE =kwargs.pop('encoder' )
SCREAMING_SNAKE_CASE =encoder_config.pop('model_type' )
SCREAMING_SNAKE_CASE =kwargs.pop('decoder' )
SCREAMING_SNAKE_CASE =decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE =AutoConfig.for_model(snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =AutoConfig.for_model(snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =True
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] ,snake_case : PretrainedConfig ,snake_case : PretrainedConfig ,**snake_case : str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.encoder.to_dict()
SCREAMING_SNAKE_CASE =self.decoder.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 252 | 1 |
'''simple docstring'''
_lowercase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.get_dummy_input()
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , ):
'''simple docstring'''
snake_case: List[Any] = 4
snake_case: Any = 32
snake_case: Dict = (32, 32)
snake_case: str = torch.manual_seed(0 )
snake_case: List[Any] = torch.device(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = (batch_size, num_channels) + sizes
snake_case: Optional[Any] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = {'hidden_states': hidden_states}
if include_temb:
snake_case: List[str] = 1_28
snake_case: str = randn_tensor((batch_size, temb_channels) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
if include_res_hidden_states_tuple:
snake_case: int = torch.manual_seed(1 )
snake_case: int = (randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ),)
if include_encoder_hidden_states:
snake_case: List[Any] = floats_tensor((batch_size, 32, 32) ).to(SCREAMING_SNAKE_CASE__ )
if include_skip_sample:
snake_case: Dict = randn_tensor(((batch_size, 3) + sizes) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
return dummy_input
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 1_28,
}
if self.block_type == "up":
snake_case: int = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
snake_case: Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case , snake_case: Optional[Any] = self.prepare_init_args_and_inputs_for_common()
snake_case: str = self.block_class(**SCREAMING_SNAKE_CASE__ )
unet_block.to(SCREAMING_SNAKE_CASE__ )
unet_block.eval()
with torch.no_grad():
snake_case: int = unet_block(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = output[0]
self.assertEqual(output.shape , self.output_shape )
snake_case: List[Any] = output[0, -1, -3:, -3:]
snake_case: Any = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(output_slice.flatten() , SCREAMING_SNAKE_CASE__ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case , snake_case: Dict = self.prepare_init_args_and_inputs_for_common()
snake_case: Optional[Any] = self.block_class(**SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
snake_case: List[Any] = model(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: str = output[0]
snake_case: Optional[Any] = torch.device(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = randn_tensor(output.shape , device=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward() | 329 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=2 , A_=24 , A_=16 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=None , A_=2 , A_=2 , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =patch_size
__UpperCamelCase =max_length
__UpperCamelCase =num_mel_bins
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =frequency_stride
__UpperCamelCase =time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCamelCase =(self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCamelCase =(self.max_length - self.patch_size) // self.time_stride + 1
__UpperCamelCase =frequency_out_dimension * time_out_dimension
__UpperCamelCase =num_patches + 2
def _a ( self ) -> Tuple:
__UpperCamelCase =floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, input_values, labels
def _a ( self ) -> Optional[int]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _a ( self , A_ , A_ , A_ ) -> str:
__UpperCamelCase =ASTModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_values': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : str = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Dict = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =ASTModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def _a ( self ) -> Tuple:
pass
def _a ( self ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['input_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@slow
def _a ( self ) -> str:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ASTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
__UpperCamelCase , __UpperCamelCase =torchaudio.load(SCREAMING_SNAKE_CASE__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> str:
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =self.default_feature_extractor
__UpperCamelCase =ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(A_ )
__UpperCamelCase =self.default_feature_extractor
__UpperCamelCase , __UpperCamelCase =prepare_audio()
__UpperCamelCase =audio.squeeze().numpy()
__UpperCamelCase =feature_extractor(A_ , sampling_rate=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-0.8760, -7.0042, -8.6602] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 682 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
A : Union[str, Any] = False
@skip_mps
class UpperCamelCase( _a , _a , _a , unittest.TestCase ):
snake_case_ : int = StableDiffusionAttendAndExcitePipeline
snake_case_ : List[str] = False
snake_case_ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
snake_case_ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
snake_case_ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict ) -> int:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] ) -> int:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE , )
__snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
__snake_case = CLIPTextModel(SCREAMING_SNAKE_CASE )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=0 ) -> Tuple:
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = __snake_case = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case = "cpu"
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
__snake_case = pipe(**SCREAMING_SNAKE_CASE ).images
__snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
__snake_case = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
__snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class UpperCamelCase( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple ) -> int:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int ) -> List[Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
'''simple docstring'''
__snake_case = torch.manual_seed(5_1 )
__snake_case = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
pipe.to("cuda" )
__snake_case = "a painting of an elephant with glasses"
__snake_case = [5, 7]
__snake_case = pipe(
prompt=SCREAMING_SNAKE_CASE , token_indices=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 371 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A : List[str] = logging.get_logger(__name__)
A : List[Any] = {'vocab_file': 'spiece.model'}
A : Tuple = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
A : Any = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
A : Tuple = 0
A : str = 1
A : str = 2
A : Union[str, Any] = 3
A : Optional[Any] = 4
class UpperCamelCase( _a ):
snake_case_ : Union[str, Any] = VOCAB_FILES_NAMES
snake_case_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Tuple = """left"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : Optional[int]="</s>" , SCREAMING_SNAKE_CASE : List[str]="<unk>" , SCREAMING_SNAKE_CASE : List[str]="<sep>" , SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<cls>" , SCREAMING_SNAKE_CASE : str="<mask>" , SCREAMING_SNAKE_CASE : int=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : Dict , ) -> None:
'''simple docstring'''
__snake_case = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
__snake_case = 3
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
__snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
'''simple docstring'''
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
'''simple docstring'''
__snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> int:
'''simple docstring'''
if self.remove_space:
__snake_case = " ".join(inputs.strip().split() )
else:
__snake_case = inputs
__snake_case = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__snake_case = unicodedata.normalize("NFKD" , SCREAMING_SNAKE_CASE )
__snake_case = "".join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
__snake_case = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : str ) -> List[str]:
'''simple docstring'''
__snake_case = self.preprocess_text(SCREAMING_SNAKE_CASE )
__snake_case = self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
__snake_case = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__snake_case = cur_pieces[1:]
else:
__snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE )
else:
new_pieces.append(SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : int ) -> List[Any]:
'''simple docstring'''
__snake_case = "".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Optional[int] , ) -> str:
'''simple docstring'''
__snake_case = kwargs.pop("use_source_tokenizer" , SCREAMING_SNAKE_CASE )
__snake_case = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case = []
__snake_case = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE ) )
__snake_case = []
sub_texts.append(SCREAMING_SNAKE_CASE )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__snake_case = "".join(SCREAMING_SNAKE_CASE )
__snake_case = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case = self.clean_up_tokenization(SCREAMING_SNAKE_CASE )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , "wb" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 371 | 1 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=0.2 , snake_case_=0.2 ):
_snake_case : Optional[Any] = bp_numa
_snake_case : List[str] = bp_numa
_snake_case : str = bp_numa
_snake_case : List[str] = conva_get[:2]
_snake_case : Dict = conva_get[2]
_snake_case : List[Any] = size_pa
_snake_case : Optional[int] = rate_w
_snake_case : Dict = rate_t
_snake_case : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_snake_case : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_snake_case : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_snake_case : Any = -2 * np.random.rand(self.conva[1] ) + 1
_snake_case : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
_snake_case : int = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCamelCase__ ( self , snake_case_ ):
# save model dict with pickle
_snake_case : int = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(snake_case_ , "wb" ) as f:
pickle.dump(snake_case_ , snake_case_ )
print(F'Model saved: {save_path}' )
@classmethod
def lowerCamelCase__ ( cls , snake_case_ ):
# read saved model
with open(snake_case_ , "rb" ) as f:
_snake_case : List[str] = pickle.load(snake_case_ ) # noqa: S301
_snake_case : Tuple = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
_snake_case : str = model_dic.get("size_pooling1" )
_snake_case : Dict = model_dic.get("num_bp1" )
_snake_case : Any = model_dic.get("num_bp2" )
_snake_case : Optional[int] = model_dic.get("num_bp3" )
_snake_case : str = model_dic.get("rate_weight" )
_snake_case : Optional[int] = model_dic.get("rate_thre" )
# create model instance
_snake_case : int = CNN(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# modify model parameter
_snake_case : int = model_dic.get("w_conv1" )
_snake_case : Any = model_dic.get("wkj" )
_snake_case : str = model_dic.get("vji" )
_snake_case : int = model_dic.get("thre_conv1" )
_snake_case : Any = model_dic.get("thre_bp2" )
_snake_case : List[Any] = model_dic.get("thre_bp3" )
return conv_ins
def lowerCamelCase__ ( self , snake_case_ ):
return 1 / (1 + np.exp(-1 * x ))
def lowerCamelCase__ ( self , snake_case_ ):
return round(snake_case_ , 3 )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
# convolution process
_snake_case : Any = convs[0]
_snake_case : Optional[Any] = convs[1]
_snake_case : int = np.shape(snake_case_ )[0]
# get the data slice of original image data, data_focus
_snake_case : int = []
for i_focus in range(0 , size_data - size_conv + 1 , snake_case_ ):
for j_focus in range(0 , size_data - size_conv + 1 , snake_case_ ):
_snake_case : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(snake_case_ )
# calculate the feature map of every single kernel, and saved as list of matrix
_snake_case : Optional[int] = []
_snake_case : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(snake_case_ ):
_snake_case : int = []
for i_focus in range(len(snake_case_ ) ):
_snake_case : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(snake_case_ ) )
_snake_case : int = np.asmatrix(snake_case_ ).reshape(
snake_case_ , snake_case_ )
data_featuremap.append(snake_case_ )
# expanding the data slice to One dimenssion
_snake_case : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(snake_case_ ) )
_snake_case : Optional[int] = np.asarray(snake_case_ )
return focus_list, data_featuremap
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_="average_pool" ):
# pooling process
_snake_case : List[str] = len(featuremaps[0] )
_snake_case : List[str] = int(size_map / size_pooling )
_snake_case : Optional[Any] = []
for i_map in range(len(snake_case_ ) ):
_snake_case : Union[str, Any] = featuremaps[i_map]
_snake_case : Union[str, Any] = []
for i_focus in range(0 , snake_case_ , snake_case_ ):
for j_focus in range(0 , snake_case_ , snake_case_ ):
_snake_case : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(snake_case_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(snake_case_ ) )
_snake_case : List[Any] = np.asmatrix(snake_case_ ).reshape(snake_case_ , snake_case_ )
featuremap_pooled.append(snake_case_ )
return featuremap_pooled
def lowerCamelCase__ ( self , snake_case_ ):
# expanding three dimension data to one dimension list
_snake_case : List[str] = []
for i in range(len(snake_case_ ) ):
_snake_case : Optional[int] = np.shape(data[i] )
_snake_case : Dict = data[i].reshape(1 , shapes[0] * shapes[1] )
_snake_case : Any = data_listed.getA().tolist()[0]
data_expanded.extend(snake_case_ )
_snake_case : int = np.asarray(snake_case_ )
return data_expanded
def lowerCamelCase__ ( self , snake_case_ ):
# expanding matrix to one dimension list
_snake_case : Union[str, Any] = np.asarray(snake_case_ )
_snake_case : str = np.shape(snake_case_ )
_snake_case : int = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i_map in range(snake_case_ ):
_snake_case : Optional[int] = np.ones((size_map, size_map) )
for i in range(0 , snake_case_ , snake_case_ ):
for j in range(0 , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = pd_pool[
i_pool
]
_snake_case : Dict = i_pool + 1
_snake_case : Optional[Any] = np.multiply(
snake_case_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(snake_case_ )
return pd_all
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(snake_case_ )) )
print((" - - Shape: Teach_Data ", np.shape(snake_case_ )) )
_snake_case : Any = 0
_snake_case : List[str] = []
_snake_case : int = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
_snake_case : List[Any] = 0
print(F'-------------Learning Time {rp}--------------' )
for p in range(len(snake_case_ ) ):
# print('------------Learning Image: %d--------------'%p)
_snake_case : Dict = np.asmatrix(datas_train[p] )
_snake_case : Optional[Any] = np.asarray(datas_teach[p] )
_snake_case , _snake_case : Tuple = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_snake_case : List[str] = self.pooling(snake_case_ , self.size_poolinga )
_snake_case : Union[str, Any] = np.shape(snake_case_ )
_snake_case : Tuple = self._expand(snake_case_ )
_snake_case : Optional[Any] = data_bp_input
_snake_case : Dict = np.dot(snake_case_ , self.vji.T ) - self.thre_bpa
_snake_case : Any = self.sig(snake_case_ )
_snake_case : List[Any] = np.dot(snake_case_ , self.wkj.T ) - self.thre_bpa
_snake_case : List[str] = self.sig(snake_case_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_snake_case : List[str] = np.multiply(
(data_teach - bp_outa) , np.multiply(snake_case_ , (1 - bp_outa) ) )
_snake_case : Dict = np.multiply(
np.dot(snake_case_ , self.wkj ) , np.multiply(snake_case_ , (1 - bp_outa) ) )
_snake_case : str = np.dot(snake_case_ , self.vji )
_snake_case : int = pd_i_all / (self.size_poolinga * self.size_poolinga)
_snake_case : List[Any] = pd_conva_pooled.T.getA().tolist()
_snake_case : int = self._calculate_gradient_from_pool(
snake_case_ , snake_case_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_snake_case : int = self._expand_mat(pd_conva_all[k_conv] )
_snake_case : Union[str, Any] = self.rate_weight * np.dot(snake_case_ , snake_case_ )
_snake_case : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_snake_case : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_snake_case : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_snake_case : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_snake_case : Dict = self.thre_bpa - pd_k_all * self.rate_thre
_snake_case : List[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_snake_case : Any = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_snake_case : int = rp + 1
_snake_case : Tuple = error_count / patterns
all_mse.append(snake_case_ )
def draw_error():
_snake_case : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(snake_case_ , "+-" )
plt.plot(snake_case_ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(snake_case_ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def lowerCamelCase__ ( self , snake_case_ ):
# model predict
_snake_case : Dict = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(snake_case_ )) )
for p in range(len(snake_case_ ) ):
_snake_case : Union[str, Any] = np.asmatrix(datas_test[p] )
_snake_case , _snake_case : Dict = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_snake_case : int = self.pooling(snake_case_ , self.size_poolinga )
_snake_case : str = self._expand(snake_case_ )
_snake_case : Tuple = data_bp_input
_snake_case : Dict = bp_outa * self.vji.T - self.thre_bpa
_snake_case : List[Any] = self.sig(snake_case_ )
_snake_case : Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
_snake_case : int = self.sig(snake_case_ )
produce_out.extend(bp_outa.getA().tolist() )
_snake_case : Optional[int] = [list(map(self.do_round , snake_case_ ) ) for each in produce_out]
return np.asarray(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
# return the data of image after convoluting process so we can check it out
_snake_case : Any = np.asmatrix(snake_case_ )
_snake_case , _snake_case : Tuple = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_snake_case : List[str] = self.pooling(snake_case_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 87 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Any = TextToVideoSDPipeline
__lowercase : str = TEXT_TO_IMAGE_PARAMS
__lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_snake_case : Tuple = CLIPTextModel(snake_case_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("mps" ):
_snake_case : str = torch.manual_seed(snake_case_ )
else:
_snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ )
_snake_case : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : int = self.get_dummy_inputs(snake_case_ )
_snake_case : Union[str, Any] = "np"
_snake_case : Dict = sd_pipe(**snake_case_ ).frames
_snake_case : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : Tuple = pipe.to("cuda" )
_snake_case : List[Any] = "Spiderman is surfing"
_snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames
_snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self ):
_snake_case : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : int = pipe.to("cuda" )
_snake_case : Any = "Spiderman is surfing"
_snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames
_snake_case : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 87 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger("""transformers.models.speecht5""")
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
hf_model.apply_weight_norm()
lowercase : Any = checkpoint['input_conv.weight_g']
lowercase : str = checkpoint['input_conv.weight_v']
lowercase : Union[str, Any] = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowercase : int = checkpoint[f"upsamples.{i}.1.weight_g"]
lowercase : List[Any] = checkpoint[f"upsamples.{i}.1.weight_v"]
lowercase : List[str] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
lowercase : Dict = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
lowercase : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
lowercase : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
lowercase : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
lowercase : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
lowercase : Any = checkpoint['output_conv.1.weight_g']
lowercase : str = checkpoint['output_conv.1.weight_v']
lowercase : int = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ) -> str:
if config_path is not None:
lowercase : List[Any] = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase_ )
else:
lowercase : List[Any] = SpeechTaHifiGanConfig()
lowercase : Optional[int] = SpeechTaHifiGan(UpperCAmelCase_ )
lowercase : Union[str, Any] = torch.load(UpperCAmelCase_ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , UpperCAmelCase_ , UpperCAmelCase_ )
lowercase : str = np.load(UpperCAmelCase_ )
lowercase : Tuple = stats[0].reshape(-1 )
lowercase : Union[str, Any] = stats[1].reshape(-1 )
lowercase : Optional[int] = torch.from_numpy(UpperCAmelCase_ ).float()
lowercase : List[str] = torch.from_numpy(UpperCAmelCase_ ).float()
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowercase : str = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 336 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=13 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : int=30 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Tuple=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Optional[int]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_UpperCamelCase : Dict = size if size is not None else {'height': 18, 'width': 18}
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = image_size
_UpperCamelCase : Optional[Any] = min_resolution
_UpperCamelCase : Dict = max_resolution
_UpperCamelCase : Tuple = do_resize
_UpperCamelCase : Optional[int] = size
_UpperCamelCase : Tuple = do_normalize
_UpperCamelCase : str = image_mean
_UpperCamelCase : int = image_std
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = ViTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'size' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
_UpperCamelCase : int = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : int = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
_UpperCamelCase : Union[str, Any] = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : Optional[Any] = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
_UpperCamelCase : Dict = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : Any = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
| 195 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase : List[Any] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase : Any = 1.054571817E-34 # unit of ℏ : J * s
lowerCAmelCase : List[str] = 3E8 # unit of c : m * s^-1
def _A ( A ,A ,A ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowercase : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase : Optional[int] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase : List[str] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425 | 0 |
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> list:
'''simple docstring'''
__lowercase = len(_UpperCAmelCase )
__lowercase = [[0] * n for i in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
__lowercase = y_points[i]
for i in range(2 , _UpperCAmelCase ):
for j in range(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase = """LayoutLMv2ImageProcessor"""
__lowerCAmelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
__lowercase = kwargs.pop("feature_extractor" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
__lowercase = self.image_processor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features["words"]
__lowercase = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# add pixel values
__lowercase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(lowerCAmelCase_ , encoded_inputs["overflow_to_sample_mapping"] )
__lowercase = images
return encoded_inputs
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}''' )
return images_with_overflow
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def snake_case__ ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def snake_case__ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , )
return self.image_processor_class
@property
def snake_case__ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , )
return self.image_processor
| 321 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 109 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A = logging.get_logger(__name__)
A = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
A = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
A = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
A = json.loads(f.read() )
A = collections.OrderedDict()
A = collections.OrderedDict()
A = collections.OrderedDict()
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
A = f.readlines()
A = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase__ ):
A = b
A = idx
for wd in b:
A = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Optional[Any]="<|endoftext|>" , snake_case : List[str]="<|endoftext|>" , snake_case : Any="<|startoftext|>" , snake_case : Any="<|endoftext|>" , snake_case : Tuple=False , **snake_case : List[str] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
unk_token=snake_case , pad_token=snake_case , bos_token=snake_case , eos_token=snake_case , do_clean_text=snake_case , **snake_case , )
if not os.path.isfile(snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
A = do_clean_text
A , A , A , A = load_vocab_and_emoji(snake_case , snake_case )
A = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
return len(self.raw_vocab )
def A_ ( self : str ) -> str:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def A_ ( self : List[str] , snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(snake_case , clean=self.do_clean_text )
def A_ ( self : int , snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def A_ ( self : int , snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(snake_case )
def A_ ( self : str , snake_case : int ) -> Optional[Any]:
'''simple docstring'''
A = ''.join(snake_case ).strip()
return out_string
def A_ ( self : Optional[int] , snake_case : "Conversation" ) -> List[int]:
'''simple docstring'''
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
def A_ ( self : int , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A = 0
if os.path.isdir(snake_case ):
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
A = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
A = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
A = token_index
writer.write(','.join(snake_case ) + '\n' )
index += 1
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , snake_case )
return vocab_file, emoji_file
class UpperCAmelCase__ ( UpperCamelCase ):
def __init__( self : str , snake_case : Dict , snake_case : Optional[Any] , snake_case : List[Any] ) -> int:
'''simple docstring'''
A = vocab # same as swe
A = ids_to_tokens # same as bpe
A = emoji
A = np.max([len(snake_case ) for w in self.vocab.keys()] )
A = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
A = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
A = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
A = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
A = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
A = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
A = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
A = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
A = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : List[str] ) -> List[str]:
'''simple docstring'''
return len(self.ids_to_tokens )
def A_ ( self : Tuple , snake_case : Any ) -> Optional[int]:
'''simple docstring'''
A = self.content_repattera.sub('<URL>' , snake_case )
A = self.content_repattera.sub('<EMAIL>' , snake_case )
A = self.content_repattera.sub('<TEL>' , snake_case )
A = self.content_repattera.sub('<DATE>' , snake_case )
A = self.content_repattera.sub('<DATE>' , snake_case )
A = self.content_repattera.sub('<PRICE>' , snake_case )
A = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def A_ ( self : Any , snake_case : int , snake_case : Any=False ) -> Any:
'''simple docstring'''
A = text.replace(' ' , '<SP>' )
A = text.replace(' ' , '<SP>' )
A = text.replace('\r\n' , '<BR>' )
A = text.replace('\n' , '<BR>' )
A = text.replace('\r' , '<BR>' )
A = text.replace('\t' , '<TAB>' )
A = text.replace('—' , 'ー' )
A = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
A = text.replace(snake_case , snake_case )
if clean:
A = self.clean_text(snake_case )
def check_simbol(snake_case : Union[str, Any] ):
A = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 2:
A = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(snake_case : List[Any] ):
A = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 3:
A = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
A = 0
A = []
while pos < len(snake_case ):
A = min(len(snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
A = [] # (token_id, token, pos)
for e in range(snake_case , snake_case , -1 ):
A = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case ) > 2:
A = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case ) > 0:
# the smallest token_id is adopted
A , A , A = sorted(snake_case , key=lambda snake_case : x[0] )[0]
result.append(snake_case )
A = e
else:
A = pos + 1
A = text[pos:end]
if check_simbol(snake_case ):
result.append('<KIGOU>' )
elif checkuae(snake_case ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
A = end
return result
def A_ ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any]="\n" ) -> List[Any]:
'''simple docstring'''
A = []
A = []
A = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
A = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case )
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
A = ''.join(snake_case )
return text
| 109 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_lowerCamelCase : Optional[int] = pytest.mark.integration
_lowerCamelCase : Dict = {'comet'}
_lowerCamelCase : int = importlib.util.find_spec('fairseq') is not None
_lowerCamelCase : Optional[Any] = {'code_eval'}
_lowerCamelCase : Dict = os.name == 'nt'
_lowerCamelCase : List[str] = {'bertscore', 'frugalscore', 'perplexity'}
_lowerCamelCase : Optional[int] = importlib.util.find_spec('transformers') is not None
def _lowerCAmelCase ( __magic_name__ :str ):
@wraps(lowerCamelCase_ )
def wrapper(self :Any , __magic_name__ :Union[str, Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , lowerCamelCase_ )
return wrapper
def _lowerCAmelCase ( __magic_name__ :List[str] ):
@wraps(lowerCamelCase_ )
def wrapper(self :List[str] , __magic_name__ :Optional[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , lowerCamelCase_ )
return wrapper
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] ):
@wraps(lowerCamelCase_ )
def wrapper(self :Union[str, Any] , __magic_name__ :Union[str, Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , lowerCamelCase_ )
return wrapper
def _lowerCAmelCase ( ):
UpperCAmelCase_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@local
class snake_case__ ( parameterized.TestCase ):
'''simple docstring'''
__A = {}
__A = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def UpperCamelCase ( self : str , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ = '''[...]'''
UpperCAmelCase_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _lowerCamelCase ) ).module_path )
UpperCAmelCase_ = datasets.load.import_main_class(metric_module.__name__ , dataset=_lowerCamelCase )
# check parameters
UpperCAmelCase_ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_lowerCamelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
UpperCAmelCase_ = doctest.testmod(_lowerCamelCase , verbose=_lowerCamelCase , raise_on_error=_lowerCamelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCamelCase ( self : int , lowerCAmelCase_ : Tuple ) -> Optional[int]:
UpperCAmelCase_ = '''[...]'''
UpperCAmelCase_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _lowerCamelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
UpperCAmelCase_ = doctest.testmod(_lowerCamelCase , verbose=_lowerCamelCase , raise_on_error=_lowerCamelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCamelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> Optional[Any]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_lowerCamelCase ):
yield
else:
yield
@contextmanager
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
def load_local_metric(lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ):
return load_metric(os.path.join('''metrics''' , _lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase )
with patch('''datasets.load_metric''' ) as mock_load_metric:
UpperCAmelCase_ = load_local_metric
yield
@classmethod
def UpperCamelCase ( cls : List[Any] , lowerCAmelCase_ : Dict ) -> str:
def wrapper(lowerCAmelCase_ : Any ):
UpperCAmelCase_ = contextmanager(_lowerCamelCase )
UpperCAmelCase_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def _lowerCAmelCase ( __magic_name__ :Optional[int] ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class snake_case__ ( lowerCAmelCase__ ):
'''simple docstring'''
def UpperCamelCase ( self : Any , lowerCAmelCase_ : int ) -> Optional[int]:
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
UpperCAmelCase_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] ):
import torch
def bert_cos_score_idf(__magic_name__ :List[str] , __magic_name__ :str , *__magic_name__ :int , **__magic_name__ :Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
UpperCAmelCase_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def _lowerCAmelCase ( __magic_name__ :Dict ):
def load_from_checkpoint(__magic_name__ :Union[str, Any] ):
class snake_case__ :
'''simple docstring'''
def UpperCamelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : int ) -> Optional[int]:
assert len(_lowerCamelCase ) == 2
UpperCAmelCase_ = [0.19, 0.92]
return scores, sum(_lowerCamelCase ) / len(_lowerCamelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
UpperCAmelCase_ = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
UpperCAmelCase_ = load_from_checkpoint
yield
def _lowerCAmelCase ( ):
UpperCAmelCase_ = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
UpperCAmelCase_ = '''ERROR'''
UpperCAmelCase_ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(lowerCamelCase_ , match=re.escape(lowerCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCamelCase_ )
| 121 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=7 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=99 ,_lowerCamelCase=64 ,_lowerCamelCase=5 ,_lowerCamelCase=4 ,_lowerCamelCase=37 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=16 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=3 ,_lowerCamelCase=4 ,_lowerCamelCase=None ,) -> Dict:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = vocab_size - 1
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_lowerCamelCase ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.prepare_config_and_inputs()
__lowercase = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = GPTNeoXModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = True
__lowercase = GPTNeoXModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = GPTNeoXForQuestionAnswering(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = GPTNeoXForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = GPTNeoXForTokenClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = True
__lowercase = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,use_cache=_lowerCamelCase )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] ,dim=-1 )
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,output_hidden_states=_lowerCamelCase )
__lowercase = output_from_no_past['''hidden_states'''][0]
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,past_key_values=_lowerCamelCase ,output_hidden_states=_lowerCamelCase ,)['''hidden_states'''][0]
# select random slice
__lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Dict = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
a : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
a : Dict = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Dict = False
a : Optional[Any] = False
a : Tuple = False
a : List[Any] = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = GPTNeoXModelTester(self )
__lowercase = ConfigTester(self ,config_class=_lowerCamelCase ,hidden_size=64 ,num_attention_heads=8 )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 10] ,config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = GPTNeoXModel(_lowerCamelCase )
original_model.to(_lowerCamelCase )
original_model.eval()
__lowercase = original_model(_lowerCamelCase ).last_hidden_state
__lowercase = original_model(_lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {'''type''': scaling_type, '''factor''': 1_0.0}
__lowercase = GPTNeoXModel(_lowerCamelCase )
scaled_model.to(_lowerCamelCase )
scaled_model.eval()
__lowercase = scaled_model(_lowerCamelCase ).last_hidden_state
__lowercase = scaled_model(_lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-5 ) )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
__lowercase = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_lowerCamelCase )
__lowercase = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_lowerCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowercase = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
__lowercase = model.generate(**_lowerCamelCase ,do_sample=_lowerCamelCase ,max_new_tokens=20 )
__lowercase = tokenizer.batch_decode(_lowerCamelCase )[0]
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
| 502 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE_ : Optional[Any] = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE_ : Dict = min_till_now, max_till_now
SCREAMING_SNAKE_CASE_ : List[Any] = max(SCREAMING_SNAKE_CASE , max_till_now * number )
SCREAMING_SNAKE_CASE_ : int = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE_ : Any = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod
| 700 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__: Dict = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: List[str] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Union[str, Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__UpperCAmelCase = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None) | 90 | """simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__lowerCAmelCase ), magnitude * sin(__lowerCAmelCase )]
return [magnitude * cos(radians(__lowerCAmelCase ) ), magnitude * sin(radians(__lowerCAmelCase ) )]
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-1 ) -> bool:
'''simple docstring'''
lowerCamelCase__ =cross(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =sum(__lowerCAmelCase )
return abs(__lowerCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
a =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a =array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
a =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 530 | 0 |
"""simple docstring"""
from torch import nn
def __magic_name__ ( _lowerCamelCase : Union[str, Any] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 63 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( a__ ):
snake_case__ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 4 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ ( unittest.TestCase ):
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = BlipImageProcessor()
__lowerCAmelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""")
__lowerCAmelCase = BlipProcessor(_lowercase, _lowercase)
processor.save_pretrained(self.tmpdirname)
def _lowercase ( self: Optional[Any], **_lowercase: str):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname, **_lowercase).tokenizer
def _lowercase ( self: Dict, **_lowercase: Optional[int]):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname, **_lowercase).image_processor
def _lowercase ( self: Dict):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _lowercase ( self: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(_lowercase, 0, -1)) for x in image_inputs]
return image_inputs
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""")
__lowerCAmelCase = self.get_image_processor(do_normalize=_lowercase, padding_value=1.0)
__lowerCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=_lowercase, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, _lowercase)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, _lowercase)
def _lowercase ( self: str):
'''simple docstring'''
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BlipProcessor(tokenizer=_lowercase, image_processor=_lowercase)
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(_lowercase, return_tensors="""np""")
__lowerCAmelCase = processor(images=_lowercase, return_tensors="""np""")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def _lowercase ( self: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BlipProcessor(tokenizer=_lowercase, image_processor=_lowercase)
__lowerCAmelCase = """lower newer"""
__lowerCAmelCase = processor(text=_lowercase)
__lowerCAmelCase = tokenizer(_lowercase, return_token_type_ids=_lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def _lowercase ( self: List[Any]):
'''simple docstring'''
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BlipProcessor(tokenizer=_lowercase, image_processor=_lowercase)
__lowerCAmelCase = """lower newer"""
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=_lowercase, images=_lowercase)
self.assertListEqual(list(inputs.keys()), ["""pixel_values""", """input_ids""", """attention_mask"""])
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def _lowercase ( self: Tuple):
'''simple docstring'''
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BlipProcessor(tokenizer=_lowercase, image_processor=_lowercase)
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(_lowercase)
__lowerCAmelCase = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase, _lowercase)
def _lowercase ( self: str):
'''simple docstring'''
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BlipProcessor(tokenizer=_lowercase, image_processor=_lowercase)
__lowerCAmelCase = """lower newer"""
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=_lowercase, images=_lowercase)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()), ["""pixel_values""", """input_ids""", """attention_mask"""])
| 334 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
def __init__( self: Union[str, Any], _lowercase: str, _lowercase: Optional[Any]=12, _lowercase: Tuple=7, _lowercase: Union[str, Any]=True, _lowercase: Dict=True, _lowercase: List[Any]=True, _lowercase: int=99, _lowercase: List[str]=32, _lowercase: Dict=32, _lowercase: Optional[Any]=2, _lowercase: Optional[Any]=4, _lowercase: List[str]=37, _lowercase: Any=0.1, _lowercase: List[Any]=0.1, _lowercase: List[str]=512, _lowercase: Optional[int]=0.02, _lowercase: Dict=0, _lowercase: str=None, ):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = projection_dim
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = bos_token_id
def _lowercase ( self: Dict):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
__lowerCAmelCase = input_mask.numpy()
__lowerCAmelCase , __lowerCAmelCase = input_mask.shape
__lowerCAmelCase = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(_lowercase):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_lowercase)
def _lowercase ( self: Dict):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def _lowercase ( self: Dict, _lowercase: Union[str, Any], _lowercase: int, _lowercase: List[str]):
'''simple docstring'''
__lowerCAmelCase = TFBlipTextModel(config=_lowercase)
__lowerCAmelCase = model(_lowercase, attention_mask=_lowercase, training=_lowercase)
__lowerCAmelCase = model(_lowercase, training=_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def _lowercase ( self: Optional[int]):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase_ ( lowerCAmelCase__ , unittest.TestCase ):
__UpperCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowercase ( self: Tuple):
'''simple docstring'''
__lowerCAmelCase = BlipTextModelTester(self)
__lowerCAmelCase = ConfigTester(self, config_class=_lowercase, hidden_size=37)
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: str):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def _lowercase ( self: Any):
'''simple docstring'''
pass
def _lowercase ( self: Tuple):
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""")
def _lowercase ( self: str):
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def _lowercase ( self: Dict):
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
pass
@slow
def _lowercase ( self: str):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFBlipTextModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def _lowercase ( self: Optional[int], _lowercase: Dict=True):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_lowercase)
| 334 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
A__ : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
A__ : str = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
A__ : Dict = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
A__ : str = f"""down_blocks.{i}.resnets.{j}."""
A__ : int = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
A__ : Optional[Any] = f"""down_blocks.{i}.attentions.{j}."""
A__ : Any = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
A__ : Tuple = f"""up_blocks.{i}.resnets.{j}."""
A__ : List[Any] = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
A__ : int = f"""up_blocks.{i}.attentions.{j}."""
A__ : Dict = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
A__ : Any = f"""down_blocks.{i}.downsamplers.0.conv."""
A__ : str = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
A__ : Tuple = f"""up_blocks.{i}.upsamplers.0."""
A__ : List[Any] = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
A__ : int = """mid_block.attentions.0."""
A__ : Any = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
A__ : List[str] = f"""mid_block.resnets.{j}."""
A__ : Tuple = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _a ( __UpperCamelCase : Dict ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowerCAmelCase__ : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ : List[Any] = v.replace(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase__ : Dict = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ : Optional[int] = v.replace(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase__ : Any = v
lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
A__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
A__ : Tuple = f"""encoder.down_blocks.{i}.resnets.{j}."""
A__ : Optional[Any] = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
A__ : List[str] = f"""down_blocks.{i}.downsamplers.0."""
A__ : int = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
A__ : Optional[int] = f"""up_blocks.{i}.upsamplers.0."""
A__ : Optional[Any] = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
A__ : Optional[Any] = f"""decoder.up_blocks.{i}.resnets.{j}."""
A__ : Tuple = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
A__ : str = f"""mid_block.resnets.{i}."""
A__ : Optional[int] = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
A__ : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def _a ( __UpperCamelCase : int ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape ,1 ,1 )
def _a ( __UpperCamelCase : str ):
lowerCAmelCase__ : Union[str, Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ : Optional[Any] = v.replace(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase__ : Optional[int] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ : List[Any] = v.replace(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = v
lowerCAmelCase__ : Tuple = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ : int = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
lowerCAmelCase__ : List[str] = reshape_weight_for_sd(__UpperCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
A__ : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
A__ : Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
A__ : str = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
A__ : Tuple = {"""q""": 0, """k""": 1, """v""": 2}
def _a ( __UpperCamelCase : List[Any] ):
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : Union[str, Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
lowerCAmelCase__ : Any = k[: -len('''.q_proj.weight''' )]
lowerCAmelCase__ : str = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ : str = [None, None, None]
lowerCAmelCase__ : Union[str, Any] = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
lowerCAmelCase__ : Any = k[: -len('''.q_proj.bias''' )]
lowerCAmelCase__ : Any = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ : Any = [None, None, None]
lowerCAmelCase__ : Tuple = v
continue
lowerCAmelCase__ : Optional[int] = textenc_pattern.sub(lambda __UpperCamelCase : protected[re.escape(m.group(0 ) )] ,__UpperCamelCase )
lowerCAmelCase__ : Tuple = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowerCAmelCase__ : Any = textenc_pattern.sub(lambda __UpperCamelCase : protected[re.escape(m.group(0 ) )] ,__UpperCamelCase )
lowerCAmelCase__ : Any = torch.cat(__UpperCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda __UpperCamelCase : protected[re.escape(m.group(0 ) )] ,__UpperCamelCase )
lowerCAmelCase__ : Dict = torch.cat(__UpperCamelCase )
return new_state_dict
def _a ( __UpperCamelCase : Optional[int] ):
return text_enc_dict
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
A__ : Any = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
A__ : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
A__ : Tuple = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
A__ : Union[str, Any] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
A__ : Tuple = load_file(unet_path, device="""cpu""")
else:
A__ : Dict = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
A__ : int = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
A__ : Any = load_file(vae_path, device="""cpu""")
else:
A__ : Union[str, Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
A__ : str = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
A__ : Any = load_file(text_enc_path, device="""cpu""")
else:
A__ : Union[str, Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
A__ : Tuple = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
A__ : str = convert_unet_state_dict(unet_state_dict)
A__ : Any = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
A__ : Optional[int] = convert_vae_state_dict(vae_state_dict)
A__ : List[Any] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
A__ : List[Any] = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
A__ : Union[str, Any] = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
A__ : int = convert_text_enc_state_dict_vaa(text_enc_dict)
A__ : str = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
A__ : Union[str, Any] = convert_text_enc_state_dict(text_enc_dict)
A__ : Dict = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
A__ : Optional[int] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
A__ : List[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
A__ : List[Any] = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 233 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' ,['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' ,['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' ,[None, '''v2'''] )
def _a ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ):
lowerCAmelCase__ : int = hf_hub_url(repo_id=__UpperCamelCase ,path=__UpperCamelCase ,revision=__UpperCamelCase )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__UpperCamelCase )}'''
| 233 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __snake_case ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[str] , A : Dict ):
with open(__lowerCamelCase , encoding="""utf-8""" ) as input_file:
__snake_case: int = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
__snake_case: Any = input_file.read()
__snake_case: Any = regexp.search(__lowerCamelCase )
return match
def UpperCAmelCase__ ( self : int , A : Union[str, Any] ):
with open(__lowerCamelCase , encoding="""utf-8""" ) as input_file:
__snake_case: List[Any] = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
__snake_case: Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__snake_case: Tuple = regexp.finditer(__lowerCamelCase )
__snake_case: Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = Path("""./datasets""" )
__snake_case: Tuple = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCamelCase ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = Path("""./datasets""" )
__snake_case: int = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCamelCase ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 702 |
def A__ ( SCREAMING_SNAKE_CASE__ = 1000) -> int:
__snake_case , __snake_case: Dict = 1, 1
__snake_case: int = 2
while True:
__snake_case: str = 0
__snake_case: Any = fa + fa
__snake_case , __snake_case: Tuple = fa, f
index += 1
for _ in str(SCREAMING_SNAKE_CASE__):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 155 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = MvpTokenizer
snake_case_ = MvpTokenizerFast
snake_case_ = True
snake_case_ = filter_roberta_detectors
def lowercase_ ( self ) -> Any:
'''simple docstring'''
super().setUp()
__lowerCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowerCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowerCamelCase = {'unk_token': '<unk>'}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def lowercase_ ( self , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase_ ( self , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCamelCase = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCamelCase = tokenizer(lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that special tokens are reset
@require_torch
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCamelCase = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , lowerCamelCase__ )
self.assertIn('attention_mask' , lowerCamelCase__ )
self.assertNotIn('labels' , lowerCamelCase__ )
self.assertNotIn('decoder_attention_mask' , lowerCamelCase__ )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCamelCase = tokenizer(text_target=lowerCamelCase__ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCamelCase = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = ['A long paragraph for summarization.']
__lowerCamelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCamelCase = tokenizer(lowerCamelCase__ , text_target=lowerCamelCase__ , return_tensors='pt' )
__lowerCamelCase = inputs['input_ids']
__lowerCamelCase = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = 'A, <mask> AllenNLP sentence.'
__lowerCamelCase = tokenizer_r.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
__lowerCamelCase = tokenizer_p.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCamelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 469 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''pixel_values''']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = size if size is not None else {'shortest_edge': 256}
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowerCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='crop_size' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=size['shortest_edge'] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size['height'], size['width']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='crop_size' )
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
__lowerCamelCase = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowerCamelCase__ ):
__lowerCamelCase = target_sizes.numpy()
__lowerCamelCase = []
for idx in range(len(lowerCamelCase__ ) ):
__lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowerCamelCase__ )
__lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
__lowerCamelCase = logits.argmax(dim=1 )
__lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 469 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = (EulerDiscreteScheduler,)
lowercase__ : Any = 10
def snake_case__ ( self : Union[str, Any] , **lowercase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowercase )
return config
def snake_case__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def snake_case__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowercase = sample.to(lowercase )
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase , use_karras_sigmas=lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowercase = sample.to(lowercase )
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 715 |
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(lowercase__ )}"
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 0 |
'''simple docstring'''
from typing import Any
class lowerCAmelCase_ :
def __init__( self , _UpperCamelCase )-> Union[str, Any]:
_A = data
_A = None
def __repr__( self )-> str:
return F'Node({self.data})'
class lowerCAmelCase_ :
def __init__( self )-> Union[str, Any]:
_A = None
def __iter__( self )-> Any:
_A = self.head
while node:
yield node.data
_A = node.next
def __len__( self )-> int:
return sum(1 for _ in self )
def __repr__( self )-> str:
return "->".join([str(_UpperCamelCase ) for item in self] )
def __getitem__( self , _UpperCamelCase )-> Any:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _UpperCamelCase , _UpperCamelCase )-> None:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_A = self.head
for _ in range(_UpperCamelCase ):
_A = current.next
_A = data
def UpperCamelCase ( self , _UpperCamelCase )-> None:
self.insert_nth(len(self ) , _UpperCamelCase )
def UpperCamelCase ( self , _UpperCamelCase )-> None:
self.insert_nth(0 , _UpperCamelCase )
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase )-> None:
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_A = Node(_UpperCamelCase )
if self.head is None:
_A = new_node
elif index == 0:
_A = self.head # link new_node to head
_A = new_node
else:
_A = self.head
for _ in range(index - 1 ):
_A = temp.next
_A = temp.next
_A = new_node
def UpperCamelCase ( self )-> None: # print every node data
print(self )
def UpperCamelCase ( self )-> Any:
return self.delete_nth(0 )
def UpperCamelCase ( self )-> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self , _UpperCamelCase = 0 )-> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_A = self.head # default first node
if index == 0:
_A = self.head.next
else:
_A = self.head
for _ in range(index - 1 ):
_A = temp.next
_A = temp.next
_A = temp.next.next
return delete_node.data
def UpperCamelCase ( self )-> bool:
return self.head is None
def UpperCamelCase ( self )-> None:
_A = None
_A = self.head
while current:
# Store the current node's next node.
_A = current.next
# Make the current node's next point backwards
_A = prev
# Make the previous node be the current node
_A = current
# Make the current node the next node (to progress iteration)
_A = next_node
# Return prev in order to put the head at the end
_A = prev
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
_A = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_A = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 , 1 ) )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
_A = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'dlrow olleH',
7,
5_5_5_5,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(1_0 ),
None,
None,
12.20,
]
_A = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_A = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_A = linked_list.delete_tail()
assert result == 12.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_A = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
from doctest import testmod
testmod()
_A = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
_A = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(F'length of linked_list is : {len(__UpperCamelCase )}' )
if __name__ == "__main__":
main()
| 292 |
'''simple docstring'''
def lowerCamelCase_ ( __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 ) -> int:
"""simple docstring"""
_A = right or len(__UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCamelCase , __UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
return getitem, k
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Any ):
"""simple docstring"""
return setitem, k, v
def UpperCamelCase_( __magic_name__ : Optional[int] ):
"""simple docstring"""
return delitem, k
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : List[Any] , *__magic_name__ : Optional[Any] ):
"""simple docstring"""
try:
return fun(__magic_name__ , *__magic_name__ ), None
except Exception as e:
return None, e
a = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Tuple = HashMap(initial_block_size=4 )
_lowerCAmelCase :int = {}
for _, (fun, *args) in enumerate(__magic_name__ ):
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = _run_operation(__magic_name__ , __magic_name__ , *__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase :str = _run_operation(__magic_name__ , __magic_name__ , *__magic_name__ )
assert my_res == py_res
assert str(__magic_name__ ) == str(__magic_name__ )
assert set(__magic_name__ ) == set(__magic_name__ )
assert len(__magic_name__ ) == len(__magic_name__ )
assert set(my.items() ) == set(py.items() )
def UpperCamelCase_( ):
"""simple docstring"""
def is_public(__magic_name__ : str ) -> bool:
return not name.startswith('_' )
_lowerCAmelCase :int = {name for name in dir({} ) if is_public(__magic_name__ )}
_lowerCAmelCase :Tuple = {name for name in dir(HashMap() ) if is_public(__magic_name__ )}
assert dict_public_names > hash_public_names | 382 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""MobileViTFeatureExtractor"""]
a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 382 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
_lowercase = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowercase = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _snake_case ( snake_case__ : Optional[Any] ):
A = (images / 2 + 0.5).clamp(0 , 1 )
A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = numpy_to_pil(__lowerCAmelCase )
return images
def _snake_case ( snake_case__ : Optional[int] ):
if images.ndim == 3:
A = images[None, ...]
A = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
A = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
A = [Image.fromarray(__lowerCAmelCase ) for image in images]
return pil_images | 91 |
from __future__ import annotations
import numpy as np
def __a ( __lowerCAmelCase ) -> Optional[Any]:
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5] | 352 | 0 |
import re
import string
import numpy as np
import datasets
A : List[Any] = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
A : int = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
A : List[str] = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def __A ( self : str ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __A ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=None , __magic_name__ : List[Any]=False , __magic_name__ : Union[str, Any]=False , __magic_name__ : int=False , ) -> Any:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE_ = np.array([re.sub(lowerCamelCase_ , "" , lowerCamelCase_ ) for x in predictions] )
SCREAMING_SNAKE_CASE_ = np.array([re.sub(lowerCamelCase_ , "" , lowerCamelCase_ ) for x in references] )
else:
SCREAMING_SNAKE_CASE_ = np.asarray(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = np.asarray(lowerCamelCase_ )
if ignore_case:
SCREAMING_SNAKE_CASE_ = np.char.lower(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = np.char.lower(lowerCamelCase_ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE_ = string.punctuation.maketrans("" , "" , string.punctuation )
SCREAMING_SNAKE_CASE_ = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_ )
if ignore_numbers:
SCREAMING_SNAKE_CASE_ = string.digits.maketrans("" , "" , string.digits )
SCREAMING_SNAKE_CASE_ = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = predictions == references
return {"exact_match": np.mean(lowerCamelCase_ ) * 100}
| 718 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''pixel_values''']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = size if size is not None else {'shortest_edge': 256}
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowerCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='crop_size' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=size['shortest_edge'] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size['height'], size['width']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='crop_size' )
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
__lowerCamelCase = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowerCamelCase__ ):
__lowerCamelCase = target_sizes.numpy()
__lowerCamelCase = []
for idx in range(len(lowerCamelCase__ ) ):
__lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowerCamelCase__ )
__lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
__lowerCamelCase = logits.argmax(dim=1 )
__lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 469 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCamelCase = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
__lowerCamelCase = F"""{src_lang}-{tgt_lang}"""
__lowerCamelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__lowerCamelCase = os.path.join(UpperCamelCase__ , 'README.md' )
print(F"""Generating {path}""" )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__A = Path(__file__).resolve().parent.parent.parent
__A = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__A = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 469 | 1 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = WavaVecaForSequenceClassification.from_pretrained(A_ ,config=A_ )
lowerCamelCase__ = downstream_dict['''projector.weight''']
lowerCamelCase__ = downstream_dict['''projector.bias''']
lowerCamelCase__ = downstream_dict['''model.post_net.linear.weight''']
lowerCamelCase__ = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = WavaVecaForAudioFrameClassification.from_pretrained(A_ ,config=A_ )
lowerCamelCase__ = downstream_dict['''model.linear.weight''']
lowerCamelCase__ = downstream_dict['''model.linear.bias''']
return model
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = WavaVecaForXVector.from_pretrained(A_ ,config=A_ )
lowerCamelCase__ = downstream_dict['''connector.weight''']
lowerCamelCase__ = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase__ = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
lowerCamelCase__ = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
lowerCamelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
lowerCamelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
lowerCamelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
lowerCamelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
lowerCamelCase__ = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = torch.load(A_ ,map_location='''cpu''' )
lowerCamelCase__ = checkpoint['''Downstream''']
lowerCamelCase__ = WavaVecaConfig.from_pretrained(A_ )
lowerCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(
A_ ,return_attention_mask=A_ ,do_normalize=A_ )
lowerCamelCase__ = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
lowerCamelCase__ = convert_classification(A_ ,A_ ,A_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
lowerCamelCase__ = convert_diarization(A_ ,A_ ,A_ )
elif arch.endswith('''ForXVector''' ):
lowerCamelCase__ = convert_xvector(A_ ,A_ ,A_ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
lowerCamelCase__ = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
_a = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 709 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
def wrapper(*__snake_case ,**__snake_case ):
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = func(*__snake_case ,**__snake_case )
lowerCamelCase__ = timeit.default_timer() - starttime
return delta
lowerCamelCase__ = func.__name__
return wrapper
def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = seq_shapes or {}
for i in range(__snake_case ):
lowerCamelCase__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case ,_ArrayXD ):
lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case ,datasets.Sequence ):
while isinstance(__snake_case ,datasets.Sequence ):
lowerCamelCase__ = v.feature
lowerCamelCase__ = seq_shapes[k]
lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype )
lowerCamelCase__ = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer:
for key, record in dummy_data:
lowerCamelCase__ = features.encode_example(__snake_case )
writer.write(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 29 | 0 |
import string
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
_A = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
_A = string.ascii_uppercase.find(_snake_case )
_A = num - key
if num < 0:
_A = num + len(string.ascii_uppercase )
_A = translated + string.ascii_uppercase[num]
else:
_A = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = input('''Encrypted message: ''' )
_A = message.upper()
decrypt(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 2 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=None , lowerCAmelCase=None ):
if not conversation_id:
UpperCAmelCase_ = uuid.uuida()
if past_user_inputs is None:
UpperCAmelCase_ = []
if generated_responses is None:
UpperCAmelCase_ = []
UpperCAmelCase_ = conversation_id
UpperCAmelCase_ = past_user_inputs
UpperCAmelCase_ = generated_responses
UpperCAmelCase_ = text
def __eq__( self , lowerCAmelCase ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
UpperCAmelCase_ = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCAmelCase_ = text
def A__ ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCAmelCase_ = None
def A__ ( self , lowerCAmelCase ):
self.generated_responses.append(lowerCAmelCase )
def A__ ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
UpperCAmelCase_ = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCAmelCase_ = "user" if is_user else "bot"
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowercase__, r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ', )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
if self.tokenizer.pad_token_id is None:
UpperCAmelCase_ = self.tokenizer.eos_token
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if min_length_for_response is not None:
UpperCAmelCase_ = min_length_for_response
if minimum_tokens is not None:
UpperCAmelCase_ = minimum_tokens
if "max_length" in generate_kwargs:
UpperCAmelCase_ = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCAmelCase , lowerCAmelCase=0 , **lowerCAmelCase ):
UpperCAmelCase_ = super().__call__(lowerCAmelCase , num_workers=lowerCAmelCase , **lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) == 1:
return outputs[0]
return outputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=32 ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
UpperCAmelCase_ = self.tokenizer._build_conversation_input_ids(lowerCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCAmelCase_ = self._legacy_parse_and_tokenize(lowerCAmelCase )
if self.framework == "pt":
UpperCAmelCase_ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCAmelCase_ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def A__ ( self , lowerCAmelCase , lowerCAmelCase=10 , **lowerCAmelCase ):
UpperCAmelCase_ = generate_kwargs.get("max_length" , self.model.config.max_length )
UpperCAmelCase_ = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCAmelCase_ = max_length - minimum_tokens
UpperCAmelCase_ = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
UpperCAmelCase_ = model_inputs["attention_mask"][:, -trim:]
UpperCAmelCase_ = model_inputs.pop("conversation" )
UpperCAmelCase_ = max_length
UpperCAmelCase_ = self.model.generate(**lowerCAmelCase , **lowerCAmelCase )
if self.model.config.is_encoder_decoder:
UpperCAmelCase_ = 1
else:
UpperCAmelCase_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def A__ ( self , lowerCAmelCase , lowerCAmelCase=True ):
UpperCAmelCase_ = model_outputs["output_ids"]
UpperCAmelCase_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
UpperCAmelCase_ = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowerCAmelCase )
return conversation
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.tokenizer.eos_token_id
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
if len(lowerCAmelCase ) > self.tokenizer.model_max_length:
UpperCAmelCase_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 579 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_ = quote(__magic_name__ )
return hfh.hf_hub_url(__magic_name__ , __magic_name__ , repo_type='''dataset''' , revision=__magic_name__ )
| 407 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
UpperCAmelCase_ = np.array(__magic_name__ )
UpperCAmelCase_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = (1, 2, 1)
UpperCAmelCase_ = (1, 1, 0, 7)
UpperCAmelCase_ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
UpperCAmelCase_ = model.fit(disp=__magic_name__ , maxiter=6_0_0 , method='''nm''' )
UpperCAmelCase_ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
UpperCAmelCase_ = regressor.predict(__magic_name__ )
return y_pred[0]
def _lowerCAmelCase ( __magic_name__ :list ):
train_user.sort()
UpperCAmelCase_ = np.percentile(__magic_name__ , 2_5 )
UpperCAmelCase_ = np.percentile(__magic_name__ , 7_5 )
UpperCAmelCase_ = qa - qa
UpperCAmelCase_ = qa - (iqr * 0.1)
return low_lim
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :float ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase_ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowerCamelCase : List[str] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
_lowerCamelCase : Any = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
_lowerCamelCase : Optional[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
_lowerCamelCase : List[str] = normalize_df[:, 2].tolist()
_lowerCamelCase : Dict = normalize_df[:, 0].tolist()
_lowerCamelCase : Optional[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowerCamelCase : Union[str, Any] = normalize_df[:, [1, 2]].tolist()
_lowerCamelCase : Any = x[: len(x) - 1]
_lowerCamelCase : Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
_lowerCamelCase : List[str] = total_date[: len(total_date) - 1]
_lowerCamelCase : Any = total_user[: len(total_user) - 1]
_lowerCamelCase : Dict = total_match[: len(total_match) - 1]
_lowerCamelCase : Any = total_date[len(total_date) - 1 :]
_lowerCamelCase : List[str] = total_user[len(total_user) - 1 :]
_lowerCamelCase : Any = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowerCamelCase : List[str] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowerCamelCase : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 407 | 1 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCamelCase : Union[str, Any] = get_logger(__name__)
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=0 ):
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with FSDP.state_dict_type(
__lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCamelCase__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCamelCase__ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCamelCase__ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
lowerCamelCase__ = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=__lowerCAmelCase , storage_writer=dist_cp.FileSystemWriter(__lowerCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Dict=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__lowerCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
lowerCamelCase__ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCamelCase__ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCamelCase__ = (
os.path.join(__lowerCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
lowerCamelCase__ = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=__lowerCAmelCase , storage_reader=dist_cp.FileSystemReader(__lowerCAmelCase ) , planner=DefaultLoadPlanner() , )
lowerCamelCase__ = state_dict["""model"""]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int=0 ):
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with FSDP.state_dict_type(
__lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCamelCase__ = FSDP.optim_state_dict(__lowerCAmelCase , __lowerCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowerCamelCase__ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(__lowerCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCamelCase__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowerCamelCase__ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
lowerCamelCase__ = (
os.path.join(__lowerCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
lowerCamelCase__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(__lowerCAmelCase ) , )
lowerCamelCase__ = optim_state["""optimizer"""]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
lowerCamelCase__ = FSDP.optim_state_dict_to_load(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
optimizer.load_state_dict(__lowerCAmelCase )
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __snake_case :
'''simple docstring'''
UpperCamelCase__ : Any = PegasusConfig
UpperCamelCase__ : Optional[int] = {}
UpperCamelCase__ : Union[str, Any] = """gelu"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=False , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_=0.1 , a_=0.1 , a_=20 , a_=2 , a_=1 , a_=0 , ):
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = eos_token_id
a__ = pad_token_id
a__ = bos_token_id
def _a ( self ):
a__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
a__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
a__ = np.concatenate([input_ids, eos_tensor] , axis=1 )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a__ = prepare_pegasus_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def _a ( self , a_ , a_ , a_ ):
a__ = 20
a__ = model_class_name(a_ )
a__ = model.encode(inputs_dict["""input_ids"""] )
a__ , a__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
a__ = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
a__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
a__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a__ = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
a__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
a__ = model.decode(
decoder_input_ids[:, -1:] , a_ , decoder_attention_mask=a_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a_ , )
a__ = model.decode(a_ , a_ )
a__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _a ( self , a_ , a_ , a_ ):
a__ = 20
a__ = model_class_name(a_ )
a__ = model.encode(inputs_dict["""input_ids"""] )
a__ , a__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
a__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a__ = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
a__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a__ = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
a__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
a__ = model.decode(
decoder_input_ids[:, -1:] , a_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a_ , decoder_position_ids=a_ , )
a__ = model.decode(a_ , a_ , decoder_attention_mask=a_ )
a__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def A_ ( __a : Union[str, Any] , __a : List[str] , __a : Tuple , __a : Dict=None , __a : Optional[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
a__ = np.not_equal(__a , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
a__ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __snake_case ( SCREAMING_SNAKE_CASE ,unittest.TestCase):
'''simple docstring'''
UpperCamelCase__ : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase__ : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase__ : Any = True
UpperCamelCase__ : Dict = False
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : str = False
def _a ( self ):
a__ = FlaxPegasusModelTester(self )
a__ = ConfigTester(self , config_class=a_ )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a_ , a_ , a_ )
def _a ( self ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a_ , a_ , a_ )
def _a ( self ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ = self._prepare_for_class(a_ , a_ )
a__ = model_class(a_ )
@jax.jit
def encode_jitted(a_ , a_=None , **a_ ):
return model.encode(input_ids=a_ , attention_mask=a_ )
with self.subTest("""JIT Enabled""" ):
a__ = encode_jitted(**a_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
a__ = encode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ = model_class(a_ )
a__ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
a__ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(a_ , a_ , a_ ):
return model.decode(
decoder_input_ids=a_ , decoder_attention_mask=a_ , encoder_outputs=a_ , )
with self.subTest("""JIT Enabled""" ):
a__ = decode_jitted(**a_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
a__ = decode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self ):
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=a_ )
a__ = np.ones((1, 1) )
a__ = model(a_ )
self.assertIsNotNone(a_ )
@slow
def _a ( self ):
a__ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
a__ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
a__ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
a__ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
a__ = tokenizer(a_ , return_tensors="""np""" , truncation=a_ , max_length=512 , padding=a_ )
a__ = model.generate(**a_ , num_beams=2 ).sequences
a__ = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
assert tgt_text == decoded
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : int = 4_000_000 ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [0, 1]
_UpperCAmelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_UpperCAmelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"{solution() = }")
| 289 |
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , A : List[str] , A : List[str] , A : bool = True , A : bool = False ):
_UpperCAmelCase : Dict = scheduler
_UpperCAmelCase : str = optimizers if isinstance(A , (list, tuple) ) else [optimizers]
_UpperCAmelCase : List[str] = split_batches
_UpperCAmelCase : Dict = step_with_optimizer
_UpperCAmelCase : Tuple = GradientState()
def snake_case_ ( self : Optional[int] , *A : List[Any] , **A : Optional[int] ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*A , **A )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*A , **A )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_UpperCAmelCase : int = AcceleratorState().num_processes
for _ in range(A ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*A , **A )
else:
self.scheduler.step(*A , **A )
def snake_case_ ( self : Union[str, Any] ):
return self.scheduler.get_last_lr()
def snake_case_ ( self : str ):
return self.scheduler.state_dict()
def snake_case_ ( self : str , A : Optional[Any] ):
self.scheduler.load_state_dict(A )
def snake_case_ ( self : Tuple ):
return self.scheduler.get_lr()
def snake_case_ ( self : Optional[int] , *A : List[str] , **A : Optional[Any] ):
return self.scheduler.print_lr(*A , **A )
| 289 | 1 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCamelCase__ = NewType('''DataClass''', Any)
UpperCamelCase__ = NewType('''DataClassType''', Any)
def UpperCAmelCase ( snake_case : Tuple ):
if isinstance(snake_case , snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def UpperCAmelCase ( snake_case : list ):
_lowerCAmelCase:int = {str(snake_case ): choice for choice in choices}
return lambda snake_case : str_to_choice.get(snake_case , snake_case )
def UpperCAmelCase ( *,
snake_case : Union[str, List[str]] = None , snake_case : str = None , snake_case : Any = dataclasses.MISSING , snake_case : Callable[[], Any] = dataclasses.MISSING , snake_case : dict = None , **snake_case : Optional[int] , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowerCAmelCase:Tuple = {}
if aliases is not None:
_lowerCAmelCase:Union[str, Any] = aliases
if help is not None:
_lowerCAmelCase:Any = help
return dataclasses.field(metadata=snake_case , default=snake_case , default_factory=snake_case , **snake_case )
class a__ ( UpperCamelCase_ ):
snake_case__ = 42
def __init__( self : List[Any] ,a__ : Union[DataClassType, Iterable[DataClassType]] ,**a__ : Optional[Any]) -> Any:
"""simple docstring"""
if "formatter_class" not in kwargs:
_lowerCAmelCase:List[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**a__)
if dataclasses.is_dataclass(a__):
_lowerCAmelCase:int = [dataclass_types]
_lowerCAmelCase:List[Any] = list(a__)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(a__)
@staticmethod
def __UpperCamelCase ( a__ : ArgumentParser ,a__ : dataclasses.Field) -> str:
"""simple docstring"""
_lowerCAmelCase:Tuple = F'--{field.name}'
_lowerCAmelCase:int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,a__):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''')
_lowerCAmelCase:Optional[int] = kwargs.pop('''aliases''' ,[])
if isinstance(a__ ,a__):
_lowerCAmelCase:Union[str, Any] = [aliases]
_lowerCAmelCase:Any = getattr(field.type ,'''__origin__''' ,field.type)
if origin_type is Union or (hasattr(a__ ,'''UnionType''') and isinstance(a__ ,types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(a__) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F' Problem encountered in field \'{field.name}\'.')
if type(a__) not in field.type.__args__:
# filter `str` in Union
_lowerCAmelCase:int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowerCAmelCase:Union[str, Any] = getattr(field.type ,'''__origin__''' ,field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowerCAmelCase:int = (
field.type.__args__[0] if isinstance(a__ ,field.type.__args__[1]) else field.type.__args__[1]
)
_lowerCAmelCase:Tuple = getattr(field.type ,'''__origin__''' ,field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowerCAmelCase:Union[str, Any] = {}
if origin_type is Literal or (isinstance(field.type ,a__) and issubclass(field.type ,a__)):
if origin_type is Literal:
_lowerCAmelCase:Optional[int] = field.type.__args__
else:
_lowerCAmelCase:Dict = [x.value for x in field.type]
_lowerCAmelCase:Any = make_choice_type_function(kwargs['''choices'''])
if field.default is not dataclasses.MISSING:
_lowerCAmelCase:Optional[Any] = field.default
else:
_lowerCAmelCase:Any = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowerCAmelCase:Tuple = copy(a__)
# Hack because type=bool in argparse does not behave as we want.
_lowerCAmelCase:Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowerCAmelCase:int = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowerCAmelCase:Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
_lowerCAmelCase:Any = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_lowerCAmelCase:str = True
elif isclass(a__) and issubclass(a__ ,a__):
_lowerCAmelCase:Dict = field.type.__args__[0]
_lowerCAmelCase:Dict = '''+'''
if field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase:List[Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
_lowerCAmelCase:str = True
else:
_lowerCAmelCase:Tuple = field.type
if field.default is not dataclasses.MISSING:
_lowerCAmelCase:List[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase:Optional[int] = field.default_factory()
else:
_lowerCAmelCase:str = True
parser.add_argument(a__ ,*a__ ,**a__)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowerCAmelCase:Any = False
parser.add_argument(F'--no_{field.name}' ,action='''store_false''' ,dest=field.name ,**a__)
def __UpperCamelCase ( self : Tuple ,a__ : DataClassType) -> Tuple:
"""simple docstring"""
if hasattr(a__ ,'''_argument_group_name'''):
_lowerCAmelCase:Union[str, Any] = self.add_argument_group(dtype._argument_group_name)
else:
_lowerCAmelCase:int = self
try:
_lowerCAmelCase:Dict[str, type] = get_type_hints(a__)
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''')
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(a__):
_lowerCAmelCase:Optional[int] = '''.'''.join(map(a__ ,sys.version_info[:3]))
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''') from ex
raise
for field in dataclasses.fields(a__):
if not field.init:
continue
_lowerCAmelCase:int = type_hints[field.name]
self._parse_dataclass_field(a__ ,a__)
def __UpperCamelCase ( self : str ,a__ : str=None ,a__ : Union[str, Any]=False ,a__ : Union[str, Any]=True ,a__ : List[str]=None ,a__ : List[str]=None ,) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
_lowerCAmelCase:str = []
if args_filename:
args_files.append(Path(a__))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix('''.args'''))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowerCAmelCase:Union[str, Any] = ArgumentParser()
args_file_parser.add_argument(a__ ,type=a__ ,action='''append''')
# Use only remaining args for further parsing (remove the args_file_flag)
_lowerCAmelCase , _lowerCAmelCase:int = args_file_parser.parse_known_args(args=a__)
_lowerCAmelCase:int = vars(a__).get(args_file_flag.lstrip('''-''') ,a__)
if cmd_args_file_paths:
args_files.extend([Path(a__) for p in cmd_args_file_paths])
_lowerCAmelCase:List[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowerCAmelCase:Optional[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = self.parse_known_args(args=a__)
_lowerCAmelCase:List[Any] = []
for dtype in self.dataclass_types:
_lowerCAmelCase:str = {f.name for f in dataclasses.fields(a__) if f.init}
_lowerCAmelCase:Any = {k: v for k, v in vars(a__).items() if k in keys}
for k in keys:
delattr(a__ ,a__)
_lowerCAmelCase:str = dtype(**a__)
outputs.append(a__)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(a__)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}')
return (*outputs,)
def __UpperCamelCase ( self : Tuple ,a__ : Dict[str, Any] ,a__ : bool = False) -> Tuple[DataClass, ...]:
"""simple docstring"""
_lowerCAmelCase:Dict = set(args.keys())
_lowerCAmelCase:str = []
for dtype in self.dataclass_types:
_lowerCAmelCase:Optional[int] = {f.name for f in dataclasses.fields(a__) if f.init}
_lowerCAmelCase:Any = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
_lowerCAmelCase:Optional[Any] = dtype(**a__)
outputs.append(a__)
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(a__)}')
return tuple(a__)
def __UpperCamelCase ( self : Any ,a__ : str ,a__ : bool = False) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(a__) ,encoding='''utf-8''') as open_json_file:
_lowerCAmelCase:Union[str, Any] = json.loads(open_json_file.read())
_lowerCAmelCase:Optional[Any] = self.parse_dict(a__ ,allow_extra_keys=a__)
return tuple(a__)
def __UpperCamelCase ( self : str ,a__ : str ,a__ : bool = False) -> Tuple[DataClass, ...]:
"""simple docstring"""
_lowerCAmelCase:str = self.parse_dict(yaml.safe_load(Path(a__).read_text()) ,allow_extra_keys=a__)
return tuple(a__)
| 439 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class a__ ( UpperCamelCase_ ):
snake_case__ = '''bert-generation'''
def __init__( self : Dict ,a__ : str=5_0358 ,a__ : List[str]=1024 ,a__ : int=24 ,a__ : Optional[Any]=16 ,a__ : List[str]=4096 ,a__ : Optional[int]="gelu" ,a__ : str=0.1 ,a__ : Union[str, Any]=0.1 ,a__ : int=512 ,a__ : Dict=0.02 ,a__ : List[Any]=1E-12 ,a__ : List[Any]=0 ,a__ : int=2 ,a__ : str=1 ,a__ : Dict="absolute" ,a__ : int=True ,**a__ : List[str] ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,**a__)
_lowerCAmelCase:Optional[Any] = vocab_size
_lowerCAmelCase:Union[str, Any] = hidden_size
_lowerCAmelCase:Any = num_hidden_layers
_lowerCAmelCase:int = num_attention_heads
_lowerCAmelCase:int = hidden_act
_lowerCAmelCase:List[Any] = intermediate_size
_lowerCAmelCase:Optional[Any] = hidden_dropout_prob
_lowerCAmelCase:int = attention_probs_dropout_prob
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:Dict = initializer_range
_lowerCAmelCase:Union[str, Any] = layer_norm_eps
_lowerCAmelCase:int = position_embedding_type
_lowerCAmelCase:Tuple = use_cache
| 439 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 493 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = TransfoXLTokenizer
a = False
a = False
def lowercase_ ( self : List[str] ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase_ ( self : int , **__lowerCamelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE__ = '''<unk> unwanted, running'''
return input_text, output_text
def lowercase_ ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowerCamelCase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase_ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE__ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase )
def lowercase_ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCamelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 493 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: Union[str, Any] = """falcon"""
__A: int = ["""past_key_values"""]
def __init__( self : List[Any] , _lowerCamelCase : str=6_50_24 , _lowerCamelCase : Any=45_44 , _lowerCamelCase : Union[str, Any]=32 , _lowerCamelCase : Optional[Any]=71 , _lowerCamelCase : str=1E-5 , _lowerCamelCase : List[str]=0.02 , _lowerCamelCase : str=True , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=False , _lowerCamelCase : Dict=False , _lowerCamelCase : Any=True , _lowerCamelCase : int=True , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : List[Any]=11 , _lowerCamelCase : List[str]=11 , **_lowerCamelCase : List[str] , ):
_UpperCAmelCase : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : List[Any] = kwargs.pop("n_embed" , _lowerCamelCase )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Dict = layer_norm_epsilon
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : str = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Optional[Any] = bos_token_id
_UpperCAmelCase : str = eos_token_id
_UpperCAmelCase : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Optional[int] = alibi
_UpperCAmelCase : Optional[Any] = new_decoder_architecture
_UpperCAmelCase : Dict = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : List[str] = parallel_attn
_UpperCAmelCase : List[Any] = bias
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def a__ ( self : List[str] ):
return self.hidden_size // self.num_attention_heads
@property
def a__ ( self : Optional[Any] ):
return not self.alibi
| 328 |
from __future__ import annotations
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> set[str]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
_UpperCAmelCase : Optional[Any] = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
__lowerCamelCase = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 328 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : str = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """xlm"""
a_ = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self : Tuple , lowerCAmelCase_ : Optional[int]=3_0_1_4_5 , lowerCAmelCase_ : Any=2_0_4_8 , lowerCAmelCase_ : Optional[Any]=1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=5_1_2 , lowerCAmelCase_ : List[str]=2_0_4_8**-0.5 , lowerCAmelCase_ : List[str]=1e-12 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[Any]="first" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Tuple=0 , **lowerCAmelCase_ : Optional[int] , ) -> int:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = emb_dim
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = gelu_activation
__lowerCAmelCase = sinusoidal_embeddings
__lowerCAmelCase = causal
__lowerCAmelCase = asm
__lowerCAmelCase = n_langs
__lowerCAmelCase = use_lang_emb
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = bos_index
__lowerCAmelCase = eos_index
__lowerCAmelCase = pad_index
__lowerCAmelCase = unk_index
__lowerCAmelCase = mask_index
__lowerCAmelCase = is_encoder
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = embed_init_std
__lowerCAmelCase = init_std
__lowerCAmelCase = summary_type
__lowerCAmelCase = summary_use_proj
__lowerCAmelCase = summary_activation
__lowerCAmelCase = summary_proj_to_labels
__lowerCAmelCase = summary_first_dropout
__lowerCAmelCase = start_n_top
__lowerCAmelCase = end_n_top
__lowerCAmelCase = mask_token_id
__lowerCAmelCase = lang_id
if "n_words" in kwargs:
__lowerCAmelCase = kwargs['n_words']
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@property
def lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 53 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
if issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = text_path
elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = 'train'
__lowerCAmelCase = {'train': text_path, 'test': text_path}
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 53 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
while second != 0:
lowerCamelCase_ = first & second
first ^= second
lowerCamelCase_ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Optional[Any] = int(input("Enter the first number: ").strip())
lowerCamelCase : List[str] = int(input("Enter the second number: ").strip())
print(F"""{add(first, second) = }""")
| 720 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
return EnvironmentCommand()
class A( UpperCamelCase ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = huggingface_hub.__version__
lowerCamelCase_ = 'not installed'
lowerCamelCase_ = 'NA'
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = 'not installed'
if is_transformers_available():
import transformers
lowerCamelCase_ = transformers.__version__
lowerCamelCase_ = 'not installed'
if is_accelerate_available():
import accelerate
lowerCamelCase_ = accelerate.__version__
lowerCamelCase_ = 'not installed'
if is_xformers_available():
import xformers
lowerCamelCase_ = xformers.__version__
lowerCamelCase_ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def a__ ( A_ : Dict ) -> Any:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 651 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = OpenAIGPTTokenizer
UpperCAmelCase__ = OpenAIGPTTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_lowerCamelCase : Any = dict(zip(a__ , range(len(a__))))
_lowerCamelCase : List[str] = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''') as fp:
fp.write(json.dumps(a__))
with open(self.merges_file , '''w''') as fp:
fp.write('''\n'''.join(a__))
def __snake_case ( self , a__):
"""simple docstring"""
return "lower newer", "lower newer"
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file)
_lowerCamelCase : Dict = '''lower'''
_lowerCamelCase : Optional[Any] = ['''low''', '''er</w>''']
_lowerCamelCase : List[Any] = tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
_lowerCamelCase : Any = tokens + ['''<unk>''']
_lowerCamelCase : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , a__)
def __snake_case ( self , a__=15):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(a__ , **a__)
# Simple input
_lowerCamelCase : Optional[int] = '''This is a simple input'''
_lowerCamelCase : Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Optional[int] = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''')
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''')
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''')
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''')
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
def __snake_case ( self):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __A ( lowerCamelCase__ ):
"""simple docstring"""
pass
| 114 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = tempfile.mkdtemp()
_lowerCamelCase : List[str] = BlipImageProcessor()
_lowerCamelCase : Union[str, Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
_lowerCamelCase : Any = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
_lowerCamelCase : Any = InstructBlipProcessor(a__ , a__ , a__)
processor.save_pretrained(self.tmpdirname)
def __snake_case ( self , **a__):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a__).tokenizer
def __snake_case ( self , **a__):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a__).image_processor
def __snake_case ( self , **a__):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a__).qformer_tokenizer
def __snake_case ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
_lowerCamelCase : List[str] = [Image.fromarray(np.moveaxis(a__ , 0 , -1)) for x in image_inputs]
return image_inputs
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
_lowerCamelCase : str = self.get_image_processor(do_normalize=a__ , padding_value=1.0)
_lowerCamelCase : Tuple = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , a__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , a__)
self.assertIsInstance(processor.qformer_tokenizer , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Any = self.get_qformer_tokenizer()
_lowerCamelCase : Dict = InstructBlipProcessor(
tokenizer=a__ , image_processor=a__ , qformer_tokenizer=a__)
_lowerCamelCase : List[Any] = self.prepare_image_inputs()
_lowerCamelCase : List[str] = image_processor(a__ , return_tensors='''np''')
_lowerCamelCase : List[Any] = processor(images=a__ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : int = self.get_qformer_tokenizer()
_lowerCamelCase : Tuple = InstructBlipProcessor(
tokenizer=a__ , image_processor=a__ , qformer_tokenizer=a__)
_lowerCamelCase : List[Any] = '''lower newer'''
_lowerCamelCase : Any = processor(text=a__)
_lowerCamelCase : Optional[int] = tokenizer(a__ , return_token_type_ids=a__)
_lowerCamelCase : Optional[Any] = qformer_tokenizer(a__ , return_token_type_ids=a__)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_qformer_tokenizer()
_lowerCamelCase : List[Any] = InstructBlipProcessor(
tokenizer=a__ , image_processor=a__ , qformer_tokenizer=a__)
_lowerCamelCase : List[Any] = '''lower newer'''
_lowerCamelCase : Tuple = self.prepare_image_inputs()
_lowerCamelCase : Tuple = processor(text=a__ , images=a__)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(a__):
processor()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : List[Any] = self.get_qformer_tokenizer()
_lowerCamelCase : Optional[Any] = InstructBlipProcessor(
tokenizer=a__ , image_processor=a__ , qformer_tokenizer=a__)
_lowerCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Tuple = processor.batch_decode(a__)
_lowerCamelCase : str = tokenizer.batch_decode(a__)
self.assertListEqual(a__ , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : str = self.get_qformer_tokenizer()
_lowerCamelCase : str = InstructBlipProcessor(
tokenizer=a__ , image_processor=a__ , qformer_tokenizer=a__)
_lowerCamelCase : str = '''lower newer'''
_lowerCamelCase : str = self.prepare_image_inputs()
_lowerCamelCase : List[Any] = processor(text=a__ , images=a__)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 114 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 3_2 , snake_case_ = 6_4 , snake_case_ = 2_0 , snake_case_ = 7_6_8 , snake_case_=7_7 , snake_case_=4 , snake_case_ = 0.0 , snake_case_ = "silu" , snake_case_ = None , snake_case_ = None , snake_case_ = "linear" , snake_case_ = "prd" , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Dict = attention_head_dim
UpperCAmelCase_ : int = num_attention_heads * attention_head_dim
UpperCAmelCase_ : str = additional_embeddings
UpperCAmelCase_ : List[Any] = time_embed_dim or inner_dim
UpperCAmelCase_ : Tuple = embedding_proj_dim or embedding_dim
UpperCAmelCase_ : Union[str, Any] = clip_embed_dim or embedding_dim
UpperCAmelCase_ : Tuple = Timesteps(snake_case_ , snake_case_ , 0 )
UpperCAmelCase_ : Tuple = TimestepEmbedding(snake_case_ , snake_case_ , out_dim=snake_case_ , act_fn=snake_case_ )
UpperCAmelCase_ : Union[str, Any] = nn.Linear(snake_case_ , snake_case_ )
if embedding_proj_norm_type is None:
UpperCAmelCase_ : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ : Dict = nn.LayerNorm(snake_case_ )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ : Tuple = nn.Linear(snake_case_ , snake_case_ )
if encoder_hid_proj_type is None:
UpperCAmelCase_ : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ : Tuple = nn.Linear(snake_case_ , snake_case_ )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case_ ) )
if added_emb_type == "prd":
UpperCAmelCase_ : Tuple = nn.Parameter(torch.zeros(1 , 1 , snake_case_ ) )
elif added_emb_type is None:
UpperCAmelCase_ : str = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ : Dict = nn.ModuleList(
[
BasicTransformerBlock(
snake_case_ , snake_case_ , snake_case_ , dropout=snake_case_ , activation_fn='gelu' , attention_bias=snake_case_ , )
for d in range(snake_case_ )
] )
if norm_in_type == "layer":
UpperCAmelCase_ : int = nn.LayerNorm(snake_case_ )
elif norm_in_type is None:
UpperCAmelCase_ : List[str] = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ : int = nn.LayerNorm(snake_case_ )
UpperCAmelCase_ : int = nn.Linear(snake_case_ , snake_case_ )
UpperCAmelCase_ : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , snake_case_ , persistent=snake_case_ )
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
UpperCAmelCase_ : Any = nn.Parameter(torch.zeros(1 , snake_case_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = {}
def fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , 'set_processor' ):
UpperCAmelCase_ : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ )
return processors
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(snake_case_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , 'set_processor' ):
if not isinstance(snake_case_ , snake_case_ ):
module.set_processor(snake_case_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = True , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = hidden_states.shape[0]
UpperCAmelCase_ : Any = timestep
if not torch.is_tensor(snake_case_ ):
UpperCAmelCase_ : Any = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : Optional[Any] = timesteps * torch.ones(snake_case_ , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ : List[str] = self.time_proj(snake_case_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ : List[str] = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ : List[Any] = self.time_embedding(snake_case_ )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ : Union[str, Any] = self.embedding_proj_norm(snake_case_ )
UpperCAmelCase_ : Tuple = self.embedding_proj(snake_case_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ : Tuple = self.encoder_hidden_states_proj(snake_case_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
UpperCAmelCase_ : Optional[int] = self.proj_in(snake_case_ )
UpperCAmelCase_ : Tuple = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Tuple = 0
if encoder_hidden_states is not None:
additional_embeds.append(snake_case_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ : Dict = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ : str = hidden_states[:, None, :]
UpperCAmelCase_ : Optional[Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ : Dict = self.prd_embedding.to(hidden_states.dtype ).expand(snake_case_ , -1 , -1 )
additional_embeds.append(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = torch.cat(
snake_case_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ : Union[str, Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ : Optional[int] = F.pad(
snake_case_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
UpperCAmelCase_ : Dict = F.pad(snake_case_ , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ : List[str] = self.norm_in(snake_case_ )
for block in self.transformer_blocks:
UpperCAmelCase_ : List[str] = block(snake_case_ , attention_mask=snake_case_ )
UpperCAmelCase_ : Dict = self.norm_out(snake_case_ )
if self.prd_embedding is not None:
UpperCAmelCase_ : Optional[Any] = hidden_states[:, -1]
else:
UpperCAmelCase_ : List[Any] = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ : List[Any] = self.proj_to_clip_embeddings(snake_case_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 389 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Tuple = 'The dog is cute and lives in the garden house'
UpperCAmelCase_ : Dict = jnp.array([tokenizer.encode(snake_case_ )] )
UpperCAmelCase_ : str = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : List[str] = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
UpperCAmelCase_ : str = model(snake_case_ )['last_hidden_state']
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
| 389 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
A_ : str = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
A_ : Optional[Any] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
A_ : List[str] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __snake_case ( __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
return float((preds == labels).mean() )
def __snake_case ( __A : Union[str, Any] , __A : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = simple_accuracy(__A , __A )
SCREAMING_SNAKE_CASE : Optional[Any] = float(fa_score(y_true=__A , y_pred=__A ) )
return {
"accuracy": acc,
"f1": fa,
}
def __snake_case ( __A : List[Any] , __A : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = float(pearsonr(__A , __A )[0] )
SCREAMING_SNAKE_CASE : Tuple = float(spearmanr(__A , __A )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def _lowerCAmelCase ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "stsb":
return pearson_and_spearman(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 265 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( __A : list[int] ) -> list[int]:
'''simple docstring'''
if len(__A ) == 0:
return array
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = min(__A ), max(__A )
# Compute the variables
SCREAMING_SNAKE_CASE : Optional[int] = _max - _min + 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
SCREAMING_SNAKE_CASE : List[Any] = i - _min
SCREAMING_SNAKE_CASE : Dict = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
SCREAMING_SNAKE_CASE : int = 0
for i in range(__A ):
while holes_repeat[i] > 0:
SCREAMING_SNAKE_CASE : int = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = input('Enter numbers separated by comma:\n')
A_ : Union[str, Any] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 265 | 1 |
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ) -> list[tuple[int, int]]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = position
__SCREAMING_SNAKE_CASE: Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__SCREAMING_SNAKE_CASE: Tuple = []
for position in positions:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCamelCase__ )
return permissible_positions
def lowerCAmelCase ( UpperCamelCase__ : list[list[int]] ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def lowerCAmelCase ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
if is_complete(UpperCamelCase__ ):
return True
for position in get_valid_pos(UpperCamelCase__ , len(UpperCamelCase__ ) ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = position
if board[y][x] == 0:
__SCREAMING_SNAKE_CASE: Union[str, Any] = curr + 1
if open_knight_tour_helper(UpperCamelCase__ , UpperCamelCase__ , curr + 1 ):
return True
__SCREAMING_SNAKE_CASE: List[Any] = 0
return False
def lowerCAmelCase ( UpperCamelCase__ : int ) -> list[list[int]]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: Dict = 1
if open_knight_tour_helper(UpperCamelCase__ , (i, j) , 1 ):
return board
__SCREAMING_SNAKE_CASE: Union[str, Any] = 0
__SCREAMING_SNAKE_CASE: Optional[int] = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCAmelCase : Optional[int] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCAmelCase : List[Any] = """main"""
# Default branch name
lowerCAmelCase : int = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
lowerCAmelCase : str = """aaaaaaa"""
# This commit does not exist, so we should 404.
lowerCAmelCase : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCAmelCase : Union[str, Any] = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class a ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def snake_case_ ( self ):
"""simple docstring"""
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''start_positions''', '''end_positions'''] )
class a ( __lowercase ):
pass
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels'''] )
@require_tf
def snake_case_ ( self ):
"""simple docstring"""
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''start_positions''', '''end_positions'''] )
class a ( __lowercase ):
pass
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels'''] )
@require_flax
def snake_case_ ( self ):
"""simple docstring"""
self.assertEqual(find_labels(_lowerCAmelCase ) , [] )
self.assertEqual(find_labels(_lowerCAmelCase ) , [] )
self.assertEqual(find_labels(_lowerCAmelCase ) , [] )
class a ( __lowercase ):
pass
self.assertEqual(find_labels(_lowerCAmelCase ) , [] )
| 146 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__SCREAMING_SNAKE_CASE = """src/transformers"""
__SCREAMING_SNAKE_CASE = """docs/source/en"""
__SCREAMING_SNAKE_CASE = """."""
def __a ( a, a, a ):
"""simple docstring"""
with open(a, "r", encoding="utf-8", newline="\n" ) as f:
_a = f.readlines()
# Find the start prompt.
_a = 0
while not lines[start_index].startswith(a ):
start_index += 1
start_index += 1
_a = start_index
while not lines[end_index].startswith(a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__SCREAMING_SNAKE_CASE = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
__SCREAMING_SNAKE_CASE = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
__SCREAMING_SNAKE_CASE = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__SCREAMING_SNAKE_CASE = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
def __a ( a ):
"""simple docstring"""
_a = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", a )
return [m.group(0 ) for m in matches]
def __a ( a, a ):
"""simple docstring"""
_a = 2 if text == "✅" or text == "❌" else len(a )
_a = (width - text_length) // 2
_a = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __a ( ):
"""simple docstring"""
_a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_a = {name: config.replace("Config", "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_a = collections.defaultdict(a )
_a = collections.defaultdict(a )
_a = collections.defaultdict(a )
_a = collections.defaultdict(a )
_a = collections.defaultdict(a )
# Let's lookup through all transformers object (once).
for attr_name in dir(a ):
_a = None
if attr_name.endswith("Tokenizer" ):
_a = slow_tokenizers
_a = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_a = fast_tokenizers
_a = attr_name[:-1_3]
elif _re_tf_models.match(a ) is not None:
_a = tf_models
_a = _re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
_a = flax_models
_a = _re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
_a = pt_models
_a = _re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_name_to_prefix.values():
_a = True
break
# Try again after removing the last word in the name
_a = "".join(camel_case_split(a )[:-1] )
# Let's build that table!
_a = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_a = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_a = [len(a ) + 2 for c in columns]
_a = max([len(a ) for name in model_names] ) + 2
# Build the table per se
_a = "|" + "|".join([_center_text(a, a ) for c, w in zip(a, a )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_a = {True: "✅", False: "❌"}
for name in model_names:
_a = model_name_to_prefix[name]
_a = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(a, a ) for l, w in zip(a, a )] ) + "|\n"
return table
def __a ( a=False ):
"""simple docstring"""
_a , _a , _a , _a = _find_text_in_file(
filename=os.path.join(a, "index.md" ), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", )
_a = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(a, "index.md" ), "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 388 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=99 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : int=16 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : int=3 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=None , ) -> Any:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str ) -> Optional[Any]:
a = LlamaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Dict , ) -> Tuple:
a = True
a = LlamaModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , ) -> str:
a = LlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , ) -> Optional[Any]:
a = True
a = True
a = LlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0]
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
a = LlamaModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> str:
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "single_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "multi_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def __UpperCAmelCase ( self : Dict ) -> Any:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = ids_tensor([1, 10] , config.vocab_size )
a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = LlamaModel(__lowerCamelCase )
original_model.to(__lowerCamelCase )
original_model.eval()
a = original_model(__lowerCamelCase ).last_hidden_state
a = original_model(__lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = {"type": scaling_type, "factor": 10.0}
a = LlamaModel(__lowerCamelCase )
scaled_model.to(__lowerCamelCase )
scaled_model.eval()
a = scaled_model(__lowerCamelCase ).last_hidden_state
a = scaled_model(__lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
@require_torch
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __UpperCAmelCase ( self : Dict ) -> int:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
a = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
a = model(torch.tensor(__lowerCamelCase ) )
# Expected mean on dim = -1
a = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
a = model(torch.tensor(__lowerCamelCase ) )
# Expected mean on dim = -1
a = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
a = model(torch.tensor(__lowerCamelCase ) )
a = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
a = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Model is curently gated" )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
a = "Simply put, the theory of relativity states that "
a = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
a = tokenizer.encode(__lowerCamelCase , return_tensors="pt" )
a = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__lowerCamelCase )
# greedy generation outputs
a = model.generate(__lowerCamelCase , max_new_tokens=64 , top_p=__lowerCamelCase , temperature=1 , do_sample=__lowerCamelCase )
a = tokenizer.decode(generated_ids[0] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 662 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCAmelCase_ ( lowerCamelCase ):
return (data["data"], data["target"])
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[Any] =XGBClassifier()
classifier.fit(lowerCamelCase , lowerCamelCase )
return classifier
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =load_iris()
__magic_name__ , __magic_name__ : Union[str, Any] =data_handling(lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =train_test_split(
lowerCamelCase , lowerCamelCase , test_size=0.2_5 )
__magic_name__ : Any =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
__magic_name__ : Union[str, Any] =xgboost(lowerCamelCase , lowerCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , display_labels=lowerCamelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 21 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Union[str, Any] = []
for part_id in partition_order:
snake_case_ : Any = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(__UpperCamelCase ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Any = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
snake_case_ : Any = spark.range(1_00 ).repartition(1 )
snake_case_ : int = Spark(__UpperCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : int = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
snake_case_ : Any = spark.range(10 ).repartition(2 )
snake_case_ : int = [1, 0]
snake_case_ : int = _generate_iterable_examples(__UpperCamelCase , __UpperCamelCase ) # Reverse the partitions.
snake_case_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , __UpperCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case_ ,snake_case_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
snake_case_ : int = spark.range(10 ).repartition(1 )
snake_case_ : List[Any] = SparkExamplesIterable(__UpperCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
snake_case_ : List[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
snake_case_ : Tuple = lambda __a : x.reverse()
snake_case_ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , [2, 1, 0] )
snake_case_ : Tuple = SparkExamplesIterable(__UpperCamelCase ).shuffle_data_sources(__UpperCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
snake_case_ ,snake_case_ : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
snake_case_ : Any = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case_ : List[str] = SparkExamplesIterable(__UpperCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
snake_case_ ,snake_case_ : List[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case_ : Tuple = SparkExamplesIterable(__UpperCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
snake_case_ ,snake_case_ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
snake_case_ : Optional[int] = spark.range(1_00 ).repartition(1 )
snake_case_ : List[Any] = Spark(__UpperCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 713 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
_SCREAMING_SNAKE_CASE = F'''https://www.google.com/search?q={query}&num=100'''
_SCREAMING_SNAKE_CASE = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
_SCREAMING_SNAKE_CASE = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
_SCREAMING_SNAKE_CASE = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 534 | 0 |
def a ( A__ : int = 10 , A__ : int = 1000 , A__ : bool = True ) -> int:
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and isinstance(A__ , A__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def a ( A__ : int , A__ : int ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def a ( A__ : int , A__ : int , A__ : int ) -> None:
"""simple docstring"""
assert (
isinstance(A__ , A__ ) and isinstance(A__ , A__ ) and isinstance(A__ , A__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(A__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
_lowercase =lower
_lowercase =higher
_lowercase =[]
while True:
_lowercase =get_avg(A__ , A__ )
last_numbers.append(A__ )
if answer(A__ ) == "low":
_lowercase =number
elif answer(A__ ) == "high":
_lowercase =number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def a ( ) -> None:
"""simple docstring"""
_lowercase =int(input('Enter lower value : ' ).strip() )
_lowercase =int(input('Enter high value : ' ).strip() )
_lowercase =int(input('Enter value to guess : ' ).strip() )
guess_the_number(A__ , A__ , A__ )
if __name__ == "__main__":
main()
| 291 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =1
_lowercase =3
_lowercase =(32, 32)
_lowercase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase )
return image
@property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
def extract(*lowerCAmelCase , **lowerCAmelCase ):
class __lowerCAmelCase :
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =torch.ones([0] )
def A__ ( self , lowerCAmelCase ) -> Dict:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase =self.dummy_cond_unet
_lowercase =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
_lowercase =self.dummy_vae
_lowercase =self.dummy_text_encoder
_lowercase =XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_lowercase =77
_lowercase =self.dummy_image.to(lowerCAmelCase )
_lowercase =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase =AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
_lowercase =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase )
_lowercase =alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase ='A painting of a squirrel eating a burger'
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
_lowercase =alt_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase , )
_lowercase =output.images
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
_lowercase =alt_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase , return_dict=lowerCAmelCase , )[0]
_lowercase =image[0, -3:, -3:, -1]
_lowercase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase =np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.dummy_cond_unet
_lowercase =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
_lowercase =self.dummy_vae
_lowercase =self.dummy_text_encoder
_lowercase =XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_lowercase =77
_lowercase =self.dummy_image.to(lowerCAmelCase )
# put models in fp16
_lowercase =unet.half()
_lowercase =vae.half()
_lowercase =bert.half()
# make sure here that pndm scheduler skips prk
_lowercase =AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
_lowercase =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase )
_lowercase =alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase ='A painting of a squirrel eating a burger'
_lowercase =torch.manual_seed(0 )
_lowercase =alt_pipe(
[prompt] , generator=lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase =init_image.resize((760, 504) )
_lowercase ='BAAI/AltDiffusion'
_lowercase =AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase , safety_checker=lowerCAmelCase , )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase ='A fantasy landscape, trending on artstation'
_lowercase =torch.manual_seed(0 )
_lowercase =pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=lowerCAmelCase , output_type='np' , )
_lowercase =output.images[0]
_lowercase =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_lowercase =np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_lowercase =init_image.resize((768, 512) )
_lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
_lowercase ='BAAI/AltDiffusion'
_lowercase =AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase , safety_checker=lowerCAmelCase , )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase ='A fantasy landscape, trending on artstation'
_lowercase =torch.manual_seed(0 )
_lowercase =pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=lowerCAmelCase , output_type='np' , )
_lowercase =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 291 | 1 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_UpperCAmelCase = logging.getLogger(__name__)
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = "sequence-classification"
def __init__( self , a__ ):
if type(a__ ) == dict:
_lowerCamelCase = Namespace(**a__ )
_lowerCamelCase = glue_output_modes[hparams.task]
_lowerCamelCase = glue_tasks_num_labels[hparams.task]
super().__init__(a__ , a__ , self.mode )
def _UpperCAmelCase ( self , **a__ ):
return self.model(**a__ )
def _UpperCAmelCase ( self , a__ , a__ ):
_lowerCamelCase = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCamelCase = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_lowerCamelCase = self(**a__ )
_lowerCamelCase = outputs[0]
_lowerCamelCase = self.trainer.lr_schedulers[0]['''scheduler''']
_lowerCamelCase = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.hparams
_lowerCamelCase = processors[args.task]()
_lowerCamelCase = processor.get_labels()
for mode in ["train", "dev"]:
_lowerCamelCase = self._feature_file(a__ )
if os.path.exists(a__ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , a__ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_lowerCamelCase = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_lowerCamelCase = convert_examples_to_features(
a__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(a__ , a__ )
def _UpperCAmelCase ( self , a__ , a__ , a__ = False ):
_lowerCamelCase = '''dev''' if mode == '''test''' else mode
_lowerCamelCase = self._feature_file(a__ )
logger.info('''Loading features from cached file %s''' , a__ )
_lowerCamelCase = torch.load(a__ )
_lowerCamelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCamelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCamelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCamelCase = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCamelCase = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a__ , a__ , a__ , a__ ) , batch_size=a__ , shuffle=a__ , )
def _UpperCAmelCase ( self , a__ , a__ ):
_lowerCamelCase = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCamelCase = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_lowerCamelCase = self(**a__ )
_lowerCamelCase , _lowerCamelCase = outputs[:2]
_lowerCamelCase = logits.detach().cpu().numpy()
_lowerCamelCase = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_lowerCamelCase = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCamelCase = np.argmax(a__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCamelCase = np.squeeze(a__ )
_lowerCamelCase = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_lowerCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
_lowerCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
_lowerCamelCase = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , a__ , a__ )}
_lowerCamelCase = dict(results.items() )
_lowerCamelCase = results
return ret, preds_list, out_label_list
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self._eval_end(a__ )
_lowerCamelCase = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self._eval_end(a__ )
_lowerCamelCase = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCAmelCase ( a__ , a__ ):
BaseTransformer.add_model_specific_args(a__ , a__ )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=a__ , required=a__ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=a__ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def _lowerCamelCase ( ):
"""simple docstring"""
_lowerCamelCase = argparse.ArgumentParser()
add_generic_args(_a , os.getcwd() )
_lowerCamelCase = GLUETransformer.add_model_specific_args(_a , os.getcwd() )
_lowerCamelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCamelCase = os.path.join(
'''./results''' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCamelCase = GLUETransformer(_a )
_lowerCamelCase = generic_train(_a , _a )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_a ) )
_lowerCamelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_a )
if __name__ == "__main__":
main()
| 297 |
def _lowerCamelCase ( _a ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_lowerCamelCase = gray_code_sequence_string(_a )
#
# convert them to integers
for i in range(len(_a ) ):
_lowerCamelCase = int(sequence[i] , 2 )
return sequence
def _lowerCamelCase ( _a ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCamelCase = gray_code_sequence_string(bit_count - 1 )
_lowerCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCamelCase = '''0''' + smaller_sequence[i]
sequence.append(_a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCamelCase = '''1''' + smaller_sequence[i]
sequence.append(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
__snake_case = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_INIT_CONFIGURATION
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = SqueezeBertTokenizer
def __init__( self: Dict,A_: int=None,A_: int=None,A_: List[Any]=True,A_: Any="[UNK]",A_: List[Any]="[SEP]",A_: int="[PAD]",A_: Optional[Any]="[CLS]",A_: List[str]="[MASK]",A_: int=True,A_: Dict=None,**A_: int,):
'''simple docstring'''
super().__init__(
A_,tokenizer_file=A_,do_lower_case=A_,unk_token=A_,sep_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,tokenize_chinese_chars=A_,strip_accents=A_,**A_,)
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase',A_ ) != do_lower_case
or normalizer_state.get('strip_accents',A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars',A_ ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(A_,normalizer_state.pop('type' ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**A_ )
__UpperCamelCase = do_lower_case
def snake_case_ ( self: Dict,A_: Tuple,A_: Union[str, Any]=None ):
'''simple docstring'''
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self: Union[str, Any],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(A_,name=A_ )
return tuple(A_ )
| 1 | """simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowerCAmelCase : str =get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
__lowerCAmelCase : Tuple =get_tests_dir("""fixtures/vocab.json""")
__lowerCAmelCase : Tuple =get_tests_dir("""fixtures""")
class _A ( unittest.TestCase ):
snake_case__ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def A__ ( self ):
"""simple docstring"""
lowercase = 0
def A__ ( self ):
"""simple docstring"""
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , """vocab.json""" ) )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(__lowerCAmelCase , __lowerCAmelCase )
# save in new folder
processor.save_pretrained(__lowerCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) as f:
lowercase = json.load(__lowerCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" ) as f:
f.write(json.dumps(__lowerCAmelCase ) )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(__lowerCAmelCase , __lowerCAmelCase )
# save in new folder
processor.save_pretrained(__lowerCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) as f:
lowercase = json.load(__lowerCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" ) as f:
f.write(json.dumps(__lowerCAmelCase ) )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__lowerCAmelCase )
# copy relevant files
copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" ) as f:
f.write("""{}""" )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowercase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowercase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
lowercase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def A__ ( self ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(__lowerCAmelCase , """vocab.txt""" )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(__lowerCAmelCase )
lowercase = CustomProcessor(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self ):
"""simple docstring"""
class _A ( lowerCAmelCase ):
snake_case__ : Optional[Any] = False
class _A ( lowerCAmelCase ):
snake_case__ : int = False
class _A ( lowerCAmelCase ):
snake_case__ : List[Any] = 'AutoFeatureExtractor'
snake_case__ : List[str] = 'AutoTokenizer'
snake_case__ : List[str] = False
try:
AutoConfig.register("""custom""" , __lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase )
# If remote code is not set, the default is to use local classes.
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self ):
"""simple docstring"""
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def A__ ( self ):
"""simple docstring"""
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _A ( unittest.TestCase ):
snake_case__ : Dict = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def A__ ( cls ):
"""simple docstring"""
lowercase = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def A__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def A__ ( self ):
"""simple docstring"""
lowercase = WavaVecaProcessor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowerCAmelCase , """test-processor""" ) , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
lowercase = WavaVecaProcessor.from_pretrained(f'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(new_processor.feature_extractor , __lowerCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A__ ( self ):
"""simple docstring"""
lowercase = WavaVecaProcessor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowerCAmelCase , """test-processor-org""" ) , push_to_hub=__lowerCAmelCase , use_auth_token=self._token , organization="""valid_org""" , )
lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(new_processor.feature_extractor , __lowerCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A__ ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(__lowerCAmelCase , """vocab.txt""" )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(__lowerCAmelCase )
lowercase = CustomProcessor(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'{USER}/test-dynamic-processor' , token=self._token )
lowercase = Repository(__lowerCAmelCase , clone_from=f'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(__lowerCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) ) as f:
lowercase = json.load(__lowerCAmelCase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowercase = AutoProcessor.from_pretrained(f'{USER}/test-dynamic-processor' , trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 359 | 0 |
from __future__ import annotations
import time
a_ : Tuple = list[tuple[int, int]]
a_ : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a_ : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _snake_case :
def __init__( self , a , a , a , a , a) -> int:
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = parent
class _snake_case :
def __init__( self , a , a) -> Dict:
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , a)
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , a)
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Path | None:
while self.node_queue:
SCREAMING_SNAKE_CASE = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(a)
SCREAMING_SNAKE_CASE = self.get_successors(a)
for node in successors:
self.node_queue.append(a)
if not self.reached:
return [self.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , a) -> list[Node]:
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(a) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(a , a , self.target.pos_y , self.target.pos_x , a))
return successors
def SCREAMING_SNAKE_CASE__ ( self , a) -> Path:
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
class _snake_case :
def __init__( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = BreadthFirstSearch(a , a)
SCREAMING_SNAKE_CASE = BreadthFirstSearch(a , a)
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE = self.fwd_bfs.node_queue.pop(0)
SCREAMING_SNAKE_CASE = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_bidirectional_path(
a , a)
SCREAMING_SNAKE_CASE = current_bwd_node
SCREAMING_SNAKE_CASE = current_fwd_node
SCREAMING_SNAKE_CASE = {
self.fwd_bfs: self.fwd_bfs.get_successors(a),
self.bwd_bfs: self.bwd_bfs.get_successors(a),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(a)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Path:
SCREAMING_SNAKE_CASE = self.fwd_bfs.retrace_path(a)
SCREAMING_SNAKE_CASE = self.bwd_bfs.retrace_path(a)
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a_ : Union[str, Any] = (0, 0)
a_ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a_ : Optional[Any] = time.time()
a_ : Union[str, Any] = BreadthFirstSearch(init, goal)
a_ : Optional[Any] = bfs.search()
a_ : List[str] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
a_ : Optional[int] = time.time()
a_ : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal)
a_ : List[Any] = bd_bfs.search()
a_ : List[str] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 444 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a_ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a_ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
a_ : set[int] = {ord(char) for char in VALID_CHARS}
a_ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
for keychar, cipherchar in zip(cycle(_UpperCAmelCase) , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCAmelCase)
return decoded
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for key in product(_UpperCAmelCase , repeat=3):
SCREAMING_SNAKE_CASE = try_key(_UpperCAmelCase , _UpperCAmelCase)
if encoded is not None:
possibles.append(_UpperCAmelCase)
return possibles
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase__ (_UpperCAmelCase = "p059_cipher.txt"):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = Path(_UpperCAmelCase).parent.joinpath(_UpperCAmelCase).read_text(encoding='utf-8')
SCREAMING_SNAKE_CASE = [int(_UpperCAmelCase) for number in data.strip().split(',')]
SCREAMING_SNAKE_CASE = filter_valid_chars(_UpperCAmelCase)
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE = filter_common_word(_UpperCAmelCase , _UpperCAmelCase)
if len(_UpperCAmelCase) == 1:
break
SCREAMING_SNAKE_CASE = possibles[0]
return sum(ord(_UpperCAmelCase) for char in decoded_text)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 444 | 1 |
'''simple docstring'''
import os
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
__snake_case = os.path.join(_lowerCamelCase , '''triangle.txt''' )
with open(_lowerCamelCase ) as f:
__snake_case = f.readlines()
__snake_case = []
for line in triangle:
__snake_case = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__snake_case = a[i - 1][j] if j != len(a[i - 1] ) else 0
__snake_case = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 24 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int , lowerCAmelCase__ : int) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0) != 0)
def SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0) == 1
assert nand_gate(0 , 1) == 1
assert nand_gate(1 , 0) == 1
assert nand_gate(1 , 1) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 125 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32 |
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = str(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" )
def UpperCAmelCase__ ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
a = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
a = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32 | 1 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = RoFormerTokenizer
UpperCAmelCase__ = RoFormerTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
super().setUp()
def lowerCamelCase__ ( self : Optional[Any] , **__snake_case : Union[str, Any] ) -> Tuple:
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def lowerCamelCase__ ( self : Dict , **__snake_case : Optional[int] ) -> List[Any]:
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
__magic_name__: Dict = """永和服装饰品有限公司,今天天气非常好"""
__magic_name__: Union[str, Any] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
__magic_name__: int = self.get_tokenizer()
__magic_name__, __magic_name__: List[Any] = self.get_chinese_input_output_texts()
__magic_name__: Any = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__: List[str] = tokens + [tokenizer.unk_token]
__magic_name__: Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: str = self.get_rust_tokenizer()
__magic_name__, __magic_name__: Optional[Any] = self.get_chinese_input_output_texts()
__magic_name__: Any = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__: List[Any] = tokens + [tokenizer.unk_token]
__magic_name__: List[str] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
pass
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
pass
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
pass
| 96 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__lowerCamelCase = None
__lowerCamelCase = {
'7B': 1_10_08,
'13B': 1_38_24,
'30B': 1_79_20,
'65B': 2_20_16,
'70B': 2_86_72,
}
__lowerCamelCase = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=1 , __UpperCAmelCase : Optional[int]=2_5_6 ) -> List[str]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def a ( __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
with open(__UpperCAmelCase , """r""" ) as f:
return json.load(__UpperCAmelCase )
def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
with open(__UpperCAmelCase , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple=True ) -> Tuple:
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__magic_name__: Any = os.path.join(__UpperCAmelCase , """tmp""" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__magic_name__: str = read_json(os.path.join(__UpperCAmelCase , """params.json""" ) )
__magic_name__: Dict = NUM_SHARDS[model_size]
__magic_name__: int = params["""n_layers"""]
__magic_name__: Optional[Any] = params["""n_heads"""]
__magic_name__: Optional[int] = n_heads // num_shards
__magic_name__: Optional[int] = params["""dim"""]
__magic_name__: Any = dim // n_heads
__magic_name__: Optional[Any] = 1_00_00.0
__magic_name__: int = 1.0 / (base ** (torch.arange(0 , __UpperCAmelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__magic_name__: Dict = params["""n_kv_heads"""] # for GQA / MQA
__magic_name__: Optional[Any] = n_heads_per_shard // num_key_value_heads
__magic_name__: Union[str, Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
__magic_name__: List[Any] = n_heads
__magic_name__: Union[str, Any] = n_heads_per_shard
__magic_name__: Tuple = dim
# permute for sliced rotary
def permute(__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any]=n_heads , __UpperCAmelCase : Tuple=dim , __UpperCAmelCase : Tuple=dim ):
return w.view(__UpperCAmelCase , dima // n_heads // 2 , 2 , __UpperCAmelCase ).transpose(1 , 2 ).reshape(__UpperCAmelCase , __UpperCAmelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__magic_name__: Optional[int] = torch.load(os.path.join(__UpperCAmelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
__magic_name__: List[Any] = [
torch.load(os.path.join(__UpperCAmelCase , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__UpperCAmelCase )
]
__magic_name__: Dict = 0
__magic_name__: Any = {"""weight_map""": {}}
for layer_i in range(__UpperCAmelCase ):
__magic_name__: Tuple = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
__magic_name__: List[Any] = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__magic_name__: Dict = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
__magic_name__: Union[str, Any] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for i in range(__UpperCAmelCase )
] , dim=0 , ).reshape(__UpperCAmelCase , __UpperCAmelCase ) )
__magic_name__: int = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for i in range(__UpperCAmelCase )
] , dim=0 , ).reshape(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
__magic_name__: Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for i in range(__UpperCAmelCase )
] , dim=0 , ).reshape(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__UpperCAmelCase )] , dim=1 )
__magic_name__: Optional[int] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__UpperCAmelCase )] , dim=0 )
__magic_name__: str = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__UpperCAmelCase )] , dim=1 )
__magic_name__: str = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__UpperCAmelCase )] , dim=0 )
__magic_name__: Union[str, Any] = inv_freq
for k, v in state_dict.items():
__magic_name__: Tuple = filename
param_count += v.numel()
torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
__magic_name__: str = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
__magic_name__: List[Any] = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
__magic_name__: int = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__UpperCAmelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__UpperCAmelCase )] , dim=0 ),
}
for k, v in state_dict.items():
__magic_name__: Tuple = filename
param_count += v.numel()
torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
# Write configs
__magic_name__: Optional[Any] = {"""total_size""": param_count * 2}
write_json(__UpperCAmelCase , os.path.join(__UpperCAmelCase , """pytorch_model.bin.index.json""" ) )
__magic_name__: Tuple = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
__magic_name__: Union[str, Any] = params["""multiple_of"""] if """multiple_of""" in params else 2_5_6
__magic_name__: Optional[int] = LlamaConfig(
hidden_size=__UpperCAmelCase , intermediate_size=compute_intermediate_size(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__UpperCAmelCase , )
config.save_pretrained(__UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
__magic_name__: Dict = LlamaForCausalLM.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__UpperCAmelCase , safe_serialization=__UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> List[Any]:
# Initialize the tokenizer based on the `spm` model
__magic_name__: Optional[int] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
__magic_name__: Dict = tokenizer_class(__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
def a ( ) -> Any:
__magic_name__: List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__UpperCAmelCase , help="""Whether or not to save using `safetensors`.""" )
__magic_name__: str = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__magic_name__: List[Any] = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 96 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ):
lowerCamelCase_: List[str] = """backbone.""" if is_semantic else """"""
lowerCamelCase_: Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", """beit.embeddings.cls_token"""),
(f"""{prefix}patch_embed.proj.weight""", """beit.embeddings.patch_embeddings.projection.weight"""),
(f"""{prefix}patch_embed.proj.bias""", """beit.embeddings.patch_embeddings.projection.bias"""),
(f"""{prefix}pos_embed""", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ):
for i in range(config.num_hidden_layers ):
lowerCamelCase_: Union[str, Any] = """backbone.""" if is_semantic else """"""
# queries, keys and values
lowerCamelCase_: Any = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowerCamelCase_: Dict = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowerCamelCase_: List[str] = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowerCamelCase_: List[str] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_: List[Any] = q_bias
lowerCamelCase_: List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_: Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_: str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase_: List[str] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowerCamelCase_: Union[str, Any] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowerCamelCase_: Optional[int] = gamma_a
lowerCamelCase_: Dict = gamma_a
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: Union[str, Any] = dct.pop(_UpperCAmelCase )
lowerCamelCase_: Dict = val
def UpperCAmelCase_ ( ):
lowerCamelCase_: str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_: Optional[Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowerCamelCase_: Optional[int] = False if """rvlcdip""" in checkpoint_url else True
lowerCamelCase_: Tuple = BeitConfig(use_absolute_position_embeddings=_UpperCAmelCase , use_mask_token=_UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase_: Optional[int] = 1_0_2_4
lowerCamelCase_: Any = 4_0_9_6
lowerCamelCase_: Optional[Any] = 2_4
lowerCamelCase_: Tuple = 1_6
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase_: List[str] = 1_6
lowerCamelCase_: int = """huggingface/label-files"""
lowerCamelCase_: str = """rvlcdip-id2label.json"""
lowerCamelCase_: str = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_: Dict = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase_: Union[str, Any] = idalabel
lowerCamelCase_: Dict = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase_: Optional[Any] = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase_: int = create_rename_keys(_UpperCAmelCase , has_lm_head=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , has_lm_head=_UpperCAmelCase )
# load HuggingFace model
lowerCamelCase_: Tuple = BeitForMaskedImageModeling(_UpperCAmelCase ) if has_lm_head else BeitForImageClassification(_UpperCAmelCase )
model.eval()
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
lowerCamelCase_: int = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCAmelCase )
lowerCamelCase_: Union[str, Any] = prepare_img()
lowerCamelCase_: List[str] = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" )
lowerCamelCase_: Any = encoding["""pixel_values"""]
lowerCamelCase_: str = model(_UpperCAmelCase )
lowerCamelCase_: List[Any] = outputs.logits
# verify logits
lowerCamelCase_: Dict = [1, 1_6] if """rvlcdip""" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(_UpperCAmelCase ), "Shape of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
if has_lm_head:
lowerCamelCase_: Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
lowerCamelCase_: Dict = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
lowercase : Any = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 584 | import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase : Dict = 1_6
lowercase : Optional[int] = 3_2
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase = 1_6 ):
lowerCamelCase_: List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase_: List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_: Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_: List[Any] = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_: Optional[int] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_: Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_: int = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_: List[str] = 8
else:
lowerCamelCase_: List[Any] = None
return tokenizer.pad(
_UpperCAmelCase , padding="""longest""" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase_: int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
lowerCamelCase_: Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase : Any = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCAmelCase ) == "1":
lowerCamelCase_: List[str] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_: int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
lowerCamelCase_: Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_: Tuple = config["""lr"""]
lowerCamelCase_: Optional[Any] = int(config["""num_epochs"""] )
lowerCamelCase_: int = int(config["""seed"""] )
lowerCamelCase_: Any = int(config["""batch_size"""] )
set_seed(_UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_: Tuple = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_: List[Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_: Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_: Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_: Optional[Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_: Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_: Dict = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_: str = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCamelCase_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_: Dict = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_: int = os.path.split(_UpperCAmelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_: Tuple = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_: Tuple = model(**_UpperCAmelCase )
lowerCamelCase_: Any = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_: Dict = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_: Dict = model(**_UpperCAmelCase )
lowerCamelCase_: Tuple = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_: List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
lowerCamelCase_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(_UpperCAmelCase ),
"""epoch""": epoch,
} , step=_UpperCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase_ ( ):
lowerCamelCase_: Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=_UpperCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
lowerCamelCase_: Union[str, Any] = parser.parse_args()
lowerCamelCase_: Optional[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 584 | 1 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase = " " ) ->list:
"""simple docstring"""
lowercase : List[str] = []
lowercase : Union[str, Any] = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
lowercase : List[str] = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 319 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''vocab.txt'''}
__a = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__a = {
'''facebook/esm2_t6_8M_UR50D''': 10_24,
'''facebook/esm2_t12_35M_UR50D''': 10_24,
}
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
with open(_UpperCamelCase, '''r''' ) as f:
lowercase : List[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<cls>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__="<eos>" , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : str = load_vocab_file(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = dict(enumerate(self.all_tokens ) )
lowercase : Tuple = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase : Tuple = unk_token
lowercase : Optional[Any] = cls_token
lowercase : Union[str, Any] = pad_token
lowercase : Dict = mask_token
lowercase : Dict = eos_token
lowercase : Any = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return text.split()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=False ):
return len(self._id_to_token )
def __lowerCamelCase ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : List[str] = [self.cls_token_id]
lowercase : Dict = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase : Tuple = [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(SCREAMING_SNAKE_CASE__ ) + [1]
return mask
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCamelCase ( self ):
return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
return super()._add_tokens(SCREAMING_SNAKE_CASE__ , special_tokens=SCREAMING_SNAKE_CASE__ )
| 319 | 1 |
"""simple docstring"""
import sys
a :Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _lowercase ( __lowerCAmelCase = N ) -> int:
SCREAMING_SNAKE_CASE__ : int = -sys.maxsize - 1
for i in range(len(__lowerCAmelCase ) - 12 ):
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
SCREAMING_SNAKE_CASE__ : Optional[int] = product
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 12 |
"""simple docstring"""
a :List[str] = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = 0
while place < len(__lowerCAmelCase ):
if (place + 1 < len(__lowerCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Any = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : List[str] = divmod(__lowerCAmelCase , __lowerCAmelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape
_lowerCamelCase : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
_lowerCamelCase : int = emb.weight.data
return lin_layer
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = torch.load(_lowerCAmelCase , map_location="cpu" )
_lowerCamelCase : List[Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_lowerCamelCase : Optional[Any] = mam_aaa["model"]
remove_ignore_keys_(_lowerCAmelCase )
_lowerCamelCase : int = state_dict["encoder.embed_tokens.weight"].shape[0]
_lowerCamelCase : str = MaMaaaConfig(
vocab_size=_lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_lowerCamelCase : Optional[int] = state_dict["decoder.embed_tokens.weight"]
_lowerCamelCase : List[Any] = MaMaaaForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
UpperCAmelCase_ : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 44 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = 'convbert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = embedding_size
lowercase__ : List[str] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Dict = num_groups
lowercase__ : int = classifier_dropout
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 12 | 0 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
return x + 2
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """x = 3"""
lowerCAmelCase__ : List[str] = {}
lowerCAmelCase__ : str = evaluate(UpperCamelCase , {} , state=UpperCamelCase )
assert result == 3
self.assertDictEqual(UpperCamelCase , {"""x""": 3} )
lowerCAmelCase__ : Dict = """x = y"""
lowerCAmelCase__ : List[Any] = {"""y""": 5}
lowerCAmelCase__ : str = evaluate(UpperCamelCase , {} , state=UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase , {"""x""": 5, """y""": 5} )
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = """y = add_two(x)"""
lowerCAmelCase__ : List[Any] = {"""x""": 3}
lowerCAmelCase__ : str = evaluate(UpperCamelCase , {"""add_two""": add_two} , state=UpperCamelCase )
assert result == 5
self.assertDictEqual(UpperCamelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase__ : str = evaluate(UpperCamelCase , {} , state=UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = """x = 3"""
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : Optional[Any] = evaluate(UpperCamelCase , {} , state=UpperCamelCase )
assert result == 3
self.assertDictEqual(UpperCamelCase , {"""x""": 3} )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = """test_dict = {'x': x, 'y': add_two(x)}"""
lowerCAmelCase__ : Union[str, Any] = {"""x""": 3}
lowerCAmelCase__ : Union[str, Any] = evaluate(UpperCamelCase , {"""add_two""": add_two} , state=UpperCamelCase )
self.assertDictEqual(UpperCamelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(UpperCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = """x = 3\ny = 5"""
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : List[Any] = evaluate(UpperCamelCase , {} , state=UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase , {"""x""": 3, """y""": 5} )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = """text = f'This is x: {x}.'"""
lowerCAmelCase__ : Union[str, Any] = {"""x""": 3}
lowerCAmelCase__ : Tuple = evaluate(UpperCamelCase , {} , state=UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCamelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowerCAmelCase__ : int = {"""x""": 3}
lowerCAmelCase__ : List[Any] = evaluate(UpperCamelCase , {} , state=UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCamelCase , {"""x""": 3, """y""": 2} )
lowerCAmelCase__ : Tuple = {"""x""": 8}
lowerCAmelCase__ : int = evaluate(UpperCamelCase , {} , state=UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase , {"""x""": 8, """y""": 5} )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """test_list = [x, add_two(x)]"""
lowerCAmelCase__ : Optional[int] = {"""x""": 3}
lowerCAmelCase__ : int = evaluate(UpperCamelCase , {"""add_two""": add_two} , state=UpperCamelCase )
self.assertListEqual(UpperCamelCase , [3, 5] )
self.assertDictEqual(UpperCamelCase , {"""x""": 3, """test_list""": [3, 5]} )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = """y = x"""
lowerCAmelCase__ : List[Any] = {"""x""": 3}
lowerCAmelCase__ : List[str] = evaluate(UpperCamelCase , {} , state=UpperCamelCase )
assert result == 3
self.assertDictEqual(UpperCamelCase , {"""x""": 3, """y""": 3} )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowerCAmelCase__ : Dict = {"""x""": 3}
lowerCAmelCase__ : List[str] = evaluate(UpperCamelCase , {"""add_two""": add_two} , state=UpperCamelCase )
assert result == 5
self.assertDictEqual(UpperCamelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowerCAmelCase__ : Tuple = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowerCAmelCase__ : Optional[int] = {"""x""": 3}
lowerCAmelCase__ : Dict = evaluate(UpperCamelCase , {"""add_two""": add_two} , state=UpperCamelCase )
assert result == 5
self.assertDictEqual(UpperCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = """x = 0\nfor i in range(3):\n x = i"""
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : str = evaluate(UpperCamelCase , {"""range""": range} , state=UpperCamelCase )
assert result == 2
self.assertDictEqual(UpperCamelCase , {"""x""": 2, """i""": 2} )
| 715 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCAmelCase__ : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCAmelCase__ : List[str] = test_metrics
@require_cpu
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase__ : str = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 507 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( __lowercase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , )
def snake_case_ ( self , _lowerCAmelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
self.enable_attention_slicing(_lowerCAmelCase )
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase = 512 , _lowerCAmelCase = 512 , _lowerCAmelCase = 50 , _lowerCAmelCase = 7.5 , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: List[Any] = 1
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Tuple = len(_lowerCAmelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_lowerCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_lowerCAmelCase )}.""" )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE: List[str] = self.tokenizer(
_lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__SCREAMING_SNAKE_CASE: List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE: Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__SCREAMING_SNAKE_CASE: Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__SCREAMING_SNAKE_CASE: Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: str = text_embeddings.shape
__SCREAMING_SNAKE_CASE: Any = text_embeddings.repeat(1 , _lowerCAmelCase , 1 )
__SCREAMING_SNAKE_CASE: Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , _lowerCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE: str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE: List[str]
if negative_prompt is None:
__SCREAMING_SNAKE_CASE: List[str] = ['''''']
elif type(_lowerCAmelCase ) is not type(_lowerCAmelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCAmelCase )} !="""
f""" {type(_lowerCAmelCase )}.""" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Tuple = [negative_prompt]
elif batch_size != len(_lowerCAmelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_lowerCAmelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
__SCREAMING_SNAKE_CASE: Tuple = negative_prompt
__SCREAMING_SNAKE_CASE: Optional[Any] = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.tokenizer(
_lowerCAmelCase , padding='''max_length''' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='''pt''' , )
__SCREAMING_SNAKE_CASE: List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE: Dict = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE: List[str] = uncond_embeddings.repeat(_lowerCAmelCase , _lowerCAmelCase , 1 )
__SCREAMING_SNAKE_CASE: List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE: List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE: List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__SCREAMING_SNAKE_CASE: Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE: List[str] = torch.randn(
_lowerCAmelCase , generator=_lowerCAmelCase , device='''cpu''' , dtype=_lowerCAmelCase ).to(self.device )
__SCREAMING_SNAKE_CASE: int = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device='''cpu''' , dtype=_lowerCAmelCase ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.randn(
_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__SCREAMING_SNAKE_CASE: Optional[Any] = latents_reference.to(self.device )
__SCREAMING_SNAKE_CASE: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__SCREAMING_SNAKE_CASE: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__SCREAMING_SNAKE_CASE: Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
__SCREAMING_SNAKE_CASE: List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__SCREAMING_SNAKE_CASE: str = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__SCREAMING_SNAKE_CASE: Optional[Any] = 0 if dx < 0 else dx
__SCREAMING_SNAKE_CASE: Dict = 0 if dy < 0 else dy
__SCREAMING_SNAKE_CASE: str = max(-dx , 0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__SCREAMING_SNAKE_CASE: Union[str, Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE: List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE: Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE: Dict = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE: int = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE: Dict = eta
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE: int = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
__SCREAMING_SNAKE_CASE: str = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE: int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE: Tuple = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = 1 / 0.18215 * latents
__SCREAMING_SNAKE_CASE: Tuple = self.vae.decode(_lowerCAmelCase ).sample
__SCREAMING_SNAKE_CASE: Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE: Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__SCREAMING_SNAKE_CASE: str = self.feature_extractor(self.numpy_to_pil(_lowerCAmelCase ) , return_tensors='''pt''' ).to(
self.device )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = self.safety_checker(
images=_lowerCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__SCREAMING_SNAKE_CASE: List[str] = None
if output_type == "pil":
__SCREAMING_SNAKE_CASE: List[str] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase , nsfw_content_detected=_lowerCAmelCase )
| 202 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''data2vec-vision'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=[3, 5, 7, 11] , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=256 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=255 , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = hidden_size
__SCREAMING_SNAKE_CASE: Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE: Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE: int = intermediate_size
__SCREAMING_SNAKE_CASE: Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE: int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Any = initializer_range
__SCREAMING_SNAKE_CASE: Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE: Optional[Any] = image_size
__SCREAMING_SNAKE_CASE: List[str] = patch_size
__SCREAMING_SNAKE_CASE: Optional[int] = num_channels
__SCREAMING_SNAKE_CASE: List[Any] = use_mask_token
__SCREAMING_SNAKE_CASE: Tuple = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE: Any = use_relative_position_bias
__SCREAMING_SNAKE_CASE: Dict = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE: Any = layer_scale_init_value
__SCREAMING_SNAKE_CASE: List[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE: int = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE: Union[str, Any] = out_indices
__SCREAMING_SNAKE_CASE: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE: Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE: Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE: Optional[Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE: Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE: Any = auxiliary_concat_input
__SCREAMING_SNAKE_CASE: List[str] = semantic_loss_ignore_index
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return 1e-4
| 202 | 1 |
import os
UpperCAmelCase_ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def __magic_name__ ( lowercase ) -> int:
"""simple docstring"""
lowercase_ : Optional[Any] = 0
lowercase_ : Dict = 0
while index < len(lowercase ) - 1:
lowercase_ : Optional[Any] = SYMBOLS[numerals[index]]
lowercase_ : Tuple = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __magic_name__ ( lowercase ) -> str:
"""simple docstring"""
lowercase_ : int = """"""
lowercase_ : Tuple = num // 1000
numerals += m_count * "M"
num %= 1000
lowercase_ : int = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowercase_ : Dict = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __magic_name__ ( lowercase = "/p089_roman.txt" ) -> int:
"""simple docstring"""
lowercase_ : Dict = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
lowercase_ : Optional[int] = filea.readlines()
for line in lines:
lowercase_ : Optional[Any] = line.strip()
lowercase_ : Any = parse_roman_numerals(lowercase )
lowercase_ : List[Any] = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''') | 436 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __magic_name__ ( lowercase="" ) -> str:
"""simple docstring"""
lowercase_ : Dict = tempfile.mkdtemp()
return os.path.join(lowercase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Dict = torch.rand(12, dtype=torch.floataa ) - 0.5
lowercase_ : Union[str, Any] = AgentAudio(snake_case__ )
lowercase_ : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case__, agent_type.to_raw(), atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case__ ) )
# Ensure that the file contains the same value as the original tensor
lowercase_ , lowercase_ : Any = sf.read(snake_case__ )
self.assertTrue(torch.allclose(snake_case__, torch.tensor(snake_case__ ), atol=1E-4 ) )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Dict = torch.rand(12, dtype=torch.floataa ) - 0.5
lowercase_ : List[str] = get_new_path(suffix=""".wav""" )
sf.write(snake_case__, snake_case__, 1_60_00 )
lowercase_ : int = AgentAudio(snake_case__ )
self.assertTrue(torch.allclose(snake_case__, agent_type.to_raw(), atol=1E-4 ) )
self.assertEqual(agent_type.to_string(), snake_case__ )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : int = torch.randint(0, 2_56, (64, 64, 3) )
lowercase_ : Dict = AgentImage(snake_case__ )
lowercase_ : Optional[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case__, agent_type._tensor, atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : int = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
lowercase_ : Optional[Any] = Image.open(snake_case__ )
lowercase_ : List[str] = AgentImage(snake_case__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : int = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
lowercase_ : Optional[int] = Image.open(snake_case__ )
lowercase_ : List[Any] = AgentImage(snake_case__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : Optional[int] = """Hey!"""
lowercase_ : Tuple = AgentText(snake_case__ )
self.assertEqual(snake_case__, agent_type.to_string() )
self.assertEqual(snake_case__, agent_type.to_raw() )
self.assertEqual(snake_case__, snake_case__ ) | 436 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: float , _lowerCamelCase: int ):
if digit_amount > 0:
return round(number - int(_lowerCamelCase ) , _lowerCamelCase )
return number - int(_lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3)) | 578 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : Optional[Any] = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = downstream_dict["""projector.weight"""]
__SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict["""projector.bias"""]
__SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
__SCREAMING_SNAKE_CASE : str = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Any , _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : str = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict["""model.linear.weight"""]
__SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["""model.linear.bias"""]
return model
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: int , _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = UniSpeechSatForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = downstream_dict["""connector.weight"""]
__SCREAMING_SNAKE_CASE : List[str] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__SCREAMING_SNAKE_CASE : List[str] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__SCREAMING_SNAKE_CASE : Tuple = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__SCREAMING_SNAKE_CASE : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__SCREAMING_SNAKE_CASE : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__SCREAMING_SNAKE_CASE : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__SCREAMING_SNAKE_CASE : Tuple = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : int = torch.load(_lowerCamelCase , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE : Tuple = checkpoint["""Downstream"""]
__SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__SCREAMING_SNAKE_CASE : str = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
__SCREAMING_SNAKE_CASE : int = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith("""ForXVector""" ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__SCREAMING_SNAKE_CASE : Optional[int] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCamelCase__ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 578 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 0
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : List[str] = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Optional[int] = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[int] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE : List[Any] = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Tuple = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : str = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Optional[Any] = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = CustomImageProcessor.from_pretrained(__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = True
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
import os
from collections.abc import Iterator
def UpperCAmelCase_ ( __UpperCAmelCase : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase ).lstrip('./' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Optional[Any]:
return f"{i * ' '}*" if i else "\n##"
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(__UpperCAmelCase )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def UpperCAmelCase_ ( __UpperCAmelCase : str = "." ) -> None:
SCREAMING_SNAKE_CASE_ = ''
for filepath in sorted(good_file_paths(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = os.path.split(__UpperCAmelCase )
if filepath != old_path:
SCREAMING_SNAKE_CASE_ = print_path(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = (filepath.count(os.sep ) + 1) if filepath else 0
SCREAMING_SNAKE_CASE_ = f"{filepath}/{filename}".replace(' ' , '%20' )
SCREAMING_SNAKE_CASE_ = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f"{md_prefix(__UpperCAmelCase )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md('.') | 31 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( lowerCAmelCase ):
_a : UNetaDModel
_a : ScoreSdeVeScheduler
def __init__( self : Tuple , a : UNetaDModel , a : ScoreSdeVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : Union[str, Any] , a : int = 1 , a : int = 2_0_0_0 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , **a : Optional[Any] , ):
"""simple docstring"""
__snake_case : List[Any] =self.unet.config.sample_size
__snake_case : Union[str, Any] =(batch_size, 3, img_size, img_size)
__snake_case : str =self.unet
__snake_case : Any =randn_tensor(a , generator=a ) * self.scheduler.init_noise_sigma
__snake_case : Tuple =sample.to(self.device )
self.scheduler.set_timesteps(a )
self.scheduler.set_sigmas(a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__snake_case : int =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__snake_case : Any =self.unet(a , a ).sample
__snake_case : Dict =self.scheduler.step_correct(a , a , generator=a ).prev_sample
# prediction step
__snake_case : List[Any] =model(a , a ).sample
__snake_case : str =self.scheduler.step_pred(a , a , a , generator=a )
__snake_case , __snake_case : str =output.prev_sample, output.prev_sample_mean
__snake_case : Optional[int] =sample_mean.clamp(0 , 1 )
__snake_case : Dict =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Optional[Any] =self.numpy_to_pil(a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=a )
| 497 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __lowercase ( a : Any , a : List[Any] ) -> Dict:
__snake_case : Any =checkpoint
__snake_case : Dict ={}
__snake_case : List[Any] =vae_state_dict['''encoder.conv_in.weight''']
__snake_case : List[str] =vae_state_dict['''encoder.conv_in.bias''']
__snake_case : Union[str, Any] =vae_state_dict['''encoder.conv_out.weight''']
__snake_case : Union[str, Any] =vae_state_dict['''encoder.conv_out.bias''']
__snake_case : str =vae_state_dict['''encoder.norm_out.weight''']
__snake_case : str =vae_state_dict['''encoder.norm_out.bias''']
__snake_case : Tuple =vae_state_dict['''decoder.conv_in.weight''']
__snake_case : str =vae_state_dict['''decoder.conv_in.bias''']
__snake_case : List[str] =vae_state_dict['''decoder.conv_out.weight''']
__snake_case : Tuple =vae_state_dict['''decoder.conv_out.bias''']
__snake_case : Union[str, Any] =vae_state_dict['''decoder.norm_out.weight''']
__snake_case : List[str] =vae_state_dict['''decoder.norm_out.bias''']
__snake_case : Tuple =vae_state_dict['''quant_conv.weight''']
__snake_case : List[str] =vae_state_dict['''quant_conv.bias''']
__snake_case : Optional[int] =vae_state_dict['''post_quant_conv.weight''']
__snake_case : Tuple =vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__snake_case : Union[str, Any] =len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__snake_case : Union[str, Any] ={
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
__snake_case : List[Any] =len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__snake_case : List[Any] ={
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(a )
}
for i in range(a ):
__snake_case : Any =[key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
__snake_case : Optional[Any] =vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
__snake_case : Optional[int] =vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
__snake_case : Optional[Any] =renew_vae_resnet_paths(a )
__snake_case : Union[str, Any] ={'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Tuple =[key for key in vae_state_dict if '''encoder.mid.block''' in key]
__snake_case : Any =2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case : Tuple =[key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
__snake_case : List[str] =renew_vae_resnet_paths(a )
__snake_case : List[str] ={'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : int =[key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__snake_case : List[Any] =renew_vae_attention_paths(a )
__snake_case : Union[str, Any] ={'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
__snake_case : List[Any] =num_up_blocks - 1 - i
__snake_case : Dict =[
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
__snake_case : int =vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
__snake_case : Optional[Any] =vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
__snake_case : List[str] =renew_vae_resnet_paths(a )
__snake_case : int ={'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Any =[key for key in vae_state_dict if '''decoder.mid.block''' in key]
__snake_case : Dict =2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case : Dict =[key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
__snake_case : Tuple =renew_vae_resnet_paths(a )
__snake_case : Tuple ={'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Optional[Any] =[key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__snake_case : Dict =renew_vae_attention_paths(a )
__snake_case : List[str] ={'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def __lowercase ( a : str , a : str , ) -> Optional[int]:
# Only support V1
__snake_case : Union[str, Any] =requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__snake_case : Union[str, Any] =io.BytesIO(r.content )
__snake_case : Any =OmegaConf.load(a )
__snake_case : Union[str, Any] =512
__snake_case : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__snake_case : Any ={}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__snake_case : Optional[int] =f.get_tensor(a )
else:
__snake_case : Optional[int] =torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
__snake_case : Dict =create_vae_diffusers_config(a , image_size=a )
__snake_case : Union[str, Any] =custom_convert_ldm_vae_checkpoint(a , a )
__snake_case : str =AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
UpperCamelCase_ : Optional[Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 497 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
a__ = inspect.getfile(accelerate.test_utils )
a__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
a__ = test_metrics
@require_cpu
def lowercase__ ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowercase__ ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowercase__ ( self ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def lowercase__ ( self ):
"""simple docstring"""
print(F'''Found {torch.cuda.device_count()} devices.''' )
a__ = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
| 394 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : int = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__A : Union[str, Any] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__A : List[Any] = {
'vinai/phobert-base': 2_56,
'vinai/phobert-large': 2_56,
}
def lowerCAmelCase_ ( a : Optional[int] ):
a__ = set()
a__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ = char
a__ = set(a )
return pairs
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE:int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE:int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
"""simple docstring"""
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
a__ = vocab_file
a__ = merges_file
a__ = {}
a__ = 0
a__ = 1
a__ = 2
a__ = 3
self.add_from_file(_a )
a__ = {v: k for k, v in self.encoder.items()}
with open(_a , encoding='utf-8' ) as merges_handle:
a__ = merges_handle.read().split('\n' )[:-1]
a__ = [tuple(merge.split()[:-1] ) for merge in merges]
a__ = dict(zip(_a , range(len(_a ) ) ) )
a__ = {}
def lowercase__ ( self , _a , _a = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ = [self.cls_token_id]
a__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , _a , _a = None , _a = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def lowercase__ ( self , _a , _a = None ):
"""simple docstring"""
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowercase__ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self , _a ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ = tuple(_a )
a__ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
a__ = get_pairs(_a )
if not pairs:
return token
while True:
a__ = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ = bigram
a__ = []
a__ = 0
while i < len(_a ):
try:
a__ = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ = tuple(_a )
a__ = new_word
if len(_a ) == 1:
break
else:
a__ = get_pairs(_a )
a__ = '@@ '.join(_a )
a__ = word[:-4]
a__ = word
return word
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = []
a__ = re.findall(r'\S+\n?' , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(' ' ) ) )
return split_tokens
def lowercase__ ( self , _a ):
"""simple docstring"""
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def lowercase__ ( self , _a ):
"""simple docstring"""
return self.decoder.get(_a , self.unk_token )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = ' '.join(_a ).replace('@@ ' , '' ).strip()
return out_string
def lowercase__ ( self , _a , _a = None ):
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def lowercase__ ( self , _a ):
"""simple docstring"""
if isinstance(_a , _a ):
try:
with open(_a , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
a__ = f.readlines()
for lineTmp in lines:
a__ = lineTmp.strip()
a__ = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
a__ = line[:idx]
a__ = len(self.encoder )
| 394 | 1 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->bytes:
if len(SCREAMING_SNAKE_CASE_ ) != 32:
raise ValueError('''Input must be of length 32''' )
_lowerCamelCase : Dict = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->bytes:
if i < 0:
raise ValueError('''Input must be non-negative''' )
_lowerCamelCase : List[str] = format(SCREAMING_SNAKE_CASE_ , '''08x''' )[-8:]
_lowerCamelCase : Dict = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->bytes:
_lowerCamelCase : int = B''''''
for char in message:
bit_string += format(SCREAMING_SNAKE_CASE_ , '''08b''' ).encode('''utf-8''' )
_lowerCamelCase : Tuple = format(len(SCREAMING_SNAKE_CASE_ ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(SCREAMING_SNAKE_CASE_ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Generator[list[int], None, None]:
if len(SCREAMING_SNAKE_CASE_ ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 512 ):
_lowerCamelCase : Union[str, Any] = bit_string[pos : pos + 512]
_lowerCamelCase : List[str] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
_lowerCamelCase : int = format(SCREAMING_SNAKE_CASE_ , '''032b''' )
_lowerCamelCase : List[str] = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(SCREAMING_SNAKE_CASE_ , 2 )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
return (a + b) % 2**32
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->bytes:
_lowerCamelCase : Any = preprocess(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_lowerCamelCase : Any = 0x67452301
_lowerCamelCase : Optional[int] = 0xefcdab89
_lowerCamelCase : List[Any] = 0x98badcfe
_lowerCamelCase : Dict = 0x10325476
_lowerCamelCase : List[str] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : int = aa
_lowerCamelCase : Optional[int] = ba
_lowerCamelCase : Any = ca
_lowerCamelCase : Optional[int] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_lowerCamelCase : Dict = d ^ (b & (c ^ d))
_lowerCamelCase : int = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_lowerCamelCase : Optional[int] = c ^ (d & (b ^ c))
_lowerCamelCase : int = (5 * i + 1) % 16
elif i <= 47:
_lowerCamelCase : int = b ^ c ^ d
_lowerCamelCase : str = (3 * i + 5) % 16
else:
_lowerCamelCase : str = c ^ (b | not_aa(SCREAMING_SNAKE_CASE_ ))
_lowerCamelCase : str = (7 * i) % 16
_lowerCamelCase : Tuple = (f + a + added_consts[i] + block_words[g]) % 2**32
_lowerCamelCase : Dict = d
_lowerCamelCase : Tuple = c
_lowerCamelCase : Optional[int] = b
_lowerCamelCase : Union[str, Any] = sum_aa(SCREAMING_SNAKE_CASE_ , left_rotate_aa(SCREAMING_SNAKE_CASE_ , shift_amounts[i] ) )
# Add hashed chunk to running total
_lowerCamelCase : Optional[int] = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[Any] = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[Any] = reformat_hex(SCREAMING_SNAKE_CASE_ ) + reformat_hex(SCREAMING_SNAKE_CASE_ ) + reformat_hex(SCREAMING_SNAKE_CASE_ ) + reformat_hex(SCREAMING_SNAKE_CASE_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 | """simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ) -> List[Any]:
_lowerCamelCase : Tuple = 1.0 if scale is None else scale
_lowerCamelCase : int = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def a__ ( self ) -> Dict:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self ) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self ) -> Union[str, Any]:
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> None:
super().__init__(**_lowercase )
_lowerCamelCase : Union[str, Any] = args_dim
_lowerCamelCase : Union[str, Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
_lowerCamelCase : str = domain_map
def a__ ( self , _lowercase ) -> Tuple[torch.Tensor]:
_lowerCamelCase : Any = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
super().__init__()
_lowerCamelCase : Optional[Any] = function
def a__ ( self , _lowercase , *_lowercase ) -> str:
return self.function(_lowercase , *_lowercase )
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = 42
__snake_case = 42
def __init__( self , _lowercase = 1 ) -> None:
_lowerCamelCase : int = dim
_lowerCamelCase : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self , _lowercase ) -> Dict:
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def a__ ( self , _lowercase , _lowercase = None , _lowercase = None , ) -> Distribution:
_lowerCamelCase : Any = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def a__ ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self ) -> int:
return len(self.event_shape )
@property
def a__ ( self ) -> float:
return 0.0
def a__ ( self , _lowercase ) -> nn.Module:
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a__ ( self , *_lowercase ) -> int:
raise NotImplementedError()
@staticmethod
def a__ ( _lowercase ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"df": 1, "loc": 1, "scale": 1}
__snake_case = StudentT
@classmethod
def a__ ( cls , _lowercase , _lowercase , _lowercase ) -> List[Any]:
_lowerCamelCase : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
_lowerCamelCase : List[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"loc": 1, "scale": 1}
__snake_case = Normal
@classmethod
def a__ ( cls , _lowercase , _lowercase ) -> List[Any]:
_lowerCamelCase : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"total_count": 1, "logits": 1}
__snake_case = NegativeBinomial
@classmethod
def a__ ( cls , _lowercase , _lowercase ) -> int:
_lowerCamelCase : str = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self , _lowercase ) -> Distribution:
_lowerCamelCase, _lowerCamelCase : int = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def a__ ( self , _lowercase , _lowercase = None , _lowercase = None ) -> Distribution:
_lowerCamelCase, _lowerCamelCase : Optional[int] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 558 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase_ = False
try:
lowerCAmelCase_ = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : str = None , _UpperCamelCase : list = [] ) ->int:
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = '''*'''
else:
snake_case_ = '''➔ '''
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : str = "" ) ->Dict:
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , _UpperCamelCase )
else:
forceWrite(self.choices[index] , _UpperCamelCase )
def snake_case__( self : Optional[int] , _UpperCamelCase : int ) ->List[Any]:
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(_UpperCamelCase )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Direction , _UpperCamelCase : int = 1 ) ->List[Any]:
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_UpperCamelCase )
move_cursor(_UpperCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def snake_case__( self : Any ) ->List[str]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def snake_case__( self : Tuple ) ->str:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def snake_case__( self : Any ) ->int:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def snake_case__( self : int ) ->Union[str, Any]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_UpperCamelCase )] for number in range(1_0 )] )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _UpperCamelCase )
else:
return
else:
return
def snake_case__( self : Tuple , _UpperCamelCase : int = 0 ) ->List[str]:
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_UpperCamelCase )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(_UpperCamelCase , '''\n''' )
return choice | 39 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Optional[Any] = 256
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = ['''melgan''']
def __init__(self : Optional[Any] , A__ : SpectrogramNotesEncoder , A__ : SpectrogramContEncoder , A__ : TaFilmDecoder , A__ : DDPMScheduler , A__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase = math.log(1e-5 ) # Matches MelGAN training.
lowercase = 4.0 # Largest value for most examples
lowercase = 1_2_8
self.register_modules(
notes_encoder=A__ , continuous_encoder=A__ , decoder=A__ , scheduler=A__ , melgan=A__ , )
def UpperCAmelCase__ (self : Union[str, Any] , A__ : Any , A__ : Tuple=(-1.0, 1.0) , A__ : Any=False ) -> Any:
lowercase , lowercase = output_range
if clip:
lowercase = torch.clip(A__ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCAmelCase__ (self : Tuple , A__ : Any , A__ : List[str]=(-1.0, 1.0) , A__ : Any=False ) -> str:
lowercase , lowercase = input_range
lowercase = torch.clip(A__ , A__ , A__ ) if clip else outputs
# Scale to [0, 1].
lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCAmelCase__ (self : List[str] , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[Any] ) -> Dict:
lowercase = input_tokens > 0
lowercase , lowercase = self.notes_encoder(
encoder_input_tokens=A__ , encoder_inputs_mask=A__ )
lowercase , lowercase = self.continuous_encoder(
encoder_inputs=A__ , encoder_inputs_mask=A__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCAmelCase__ (self : int , A__ : int , A__ : Optional[int] , A__ : List[Any] ) -> str:
lowercase = noise_time
if not torch.is_tensor(A__ ):
lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase = self.decoder(
encodings_and_masks=A__ , decoder_input_tokens=A__ , decoder_noise_time=A__ )
return logits
@torch.no_grad()
def __call__(self : int , A__ : List[List[int]] , A__ : Optional[torch.Generator] = None , A__ : int = 1_0_0 , A__ : bool = True , A__ : str = "numpy" , A__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__ )}.' )
lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
for i, encoder_input_tokens in enumerate(A__ ):
if i == 0:
lowercase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase = ones
lowercase = self.scale_features(
A__ , output_range=[-1.0, 1.0] , clip=A__ )
lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A__ , continuous_mask=A__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase = self.decode(
encodings_and_masks=A__ , input_tokens=A__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase = self.scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
lowercase = self.scale_to_features(A__ , input_range=[-1.0, 1.0] )
lowercase = mel[:1]
lowercase = mel.cpu().float().numpy()
lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ )
logger.info("Generated segment" , A__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__ )
| 310 | 0 |
from statistics import mean
import numpy as np
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
snake_case_ = 0
# Number of processes finished
snake_case_ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
snake_case_ = [0] * no_of_process
# List to include calculation results
snake_case_ = [0] * no_of_process
# Sort by arrival time.
snake_case_ = [burst_time[i] for i in np.argsort(a_)]
snake_case_ = [process_name[i] for i in np.argsort(a_)]
arrival_time.sort()
while no_of_process > finished_process_count:
snake_case_ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
snake_case_ = arrival_time[i]
snake_case_ = 0
# Index showing the location of the process being performed
snake_case_ = 0
# Saves the current response ratio.
snake_case_ = 0
for i in range(0 , a_):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
snake_case_ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
snake_case_ = temp
snake_case_ = i
# Calculate the turn around time
snake_case_ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
snake_case_ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
snake_case_ = [0] * no_of_process
for i in range(0 , a_):
snake_case_ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowercase = 5
lowercase = ["A", "B", "C", "D", "E"]
lowercase = [1, 2, 3, 4, 5]
lowercase = [1, 2, 3, 4, 5]
lowercase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowercase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
f'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(f'average waiting time : {mean(waiting_time):.5f}')
print(f'average turn around time : {mean(turn_around_time):.5f}')
| 607 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a , a = None , a = None ) -> int:
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
snake_case_ = os.path.abspath('examples' )
for item in os.listdir(a ):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(a , a )
if os.path.isfile(a ) and ".py" in item_path:
with self.subTest(
tested_script=a , feature_script=a , tested_section='main()' if parser_only else 'training_function()' , ):
snake_case_ = compare_against_test(
os.path.join(a , a ) , a , a , a )
snake_case_ = '\n'.join(a )
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(a , '' )
self.assertEqual(a , '' )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.one_complete_example('complete_nlp_example.py' , a )
self.one_complete_example('complete_nlp_example.py' , a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
snake_case_ = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , a , a , a )
self.one_complete_example('complete_cv_example.py' , a , a , a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = False
@classmethod
def _UpperCamelCase ( cls ) -> Optional[int]:
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
snake_case_ = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def _UpperCamelCase ( cls ) -> Optional[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
snake_case_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
self.assertNotIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
else:
self.assertIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
@slow
def _UpperCamelCase ( self ) -> int:
snake_case_ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
snake_case_ = re.findall('({.+})' , a )
snake_case_ = [r for r in results if 'accuracy' in r][-1]
snake_case_ = ast.literal_eval(a )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _UpperCamelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(a , 'tracking' ) ) )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 607 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = tempfile.mkdtemp()
# fmt: off
__a = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__a = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__a = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__a = {'unk_token': '<unk>'}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase ) )
__a = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__a = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> Optional[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> int:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> Optional[Any]:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__a = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = self.get_image_processor()
__a = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__a = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
__a = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__a = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
__a = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__a = self.prepare_image_inputs()
__a = image_processor(UpperCAmelCase , return_tensors='np' )
__a = processor(images=UpperCAmelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__a = 'lower newer'
__a = processor(text=UpperCAmelCase )
__a = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__a = 'lower newer'
__a = self.prepare_image_inputs()
__a = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(UpperCAmelCase )
__a = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__a = 'lower newer'
__a = self.prepare_image_inputs()
__a = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 559 | import math
import sys
def lowerCAmelCase( __lowerCamelCase ):
if number != int(__lowerCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__a = [-1] * (number + 1)
__a = 0
for i in range(1 , number + 1 ):
__a = sys.maxsize
__a = int(math.sqrt(__lowerCamelCase ) )
for j in range(1 , root + 1 ):
__a = 1 + answers[i - (j**2)]
__a = min(__lowerCamelCase , __lowerCamelCase )
__a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 559 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( UpperCAmelCase_ , UpperCAmelCase_ ):
@register_to_config
def __init__( self : str , *,
lowerCamelCase__ : int = 4 , lowerCamelCase__ : int = 7_68 , lowerCamelCase__ : int , lowerCamelCase__ : Any , ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : Optional[int] = nn.Parameter(torch.zeros(lowerCamelCase__ ) )
# parameters for additional clip time embeddings
__UpperCamelCase : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
# parameters for encoder hidden states
__UpperCamelCase : Tuple = clip_extra_context_tokens
__UpperCamelCase : List[Any] = nn.Linear(
lowerCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
__UpperCamelCase : Tuple = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] = nn.LayerNorm(lowerCamelCase__ )
def a ( self : List[Any] , *, lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int ):
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__UpperCamelCase : Tuple = image_embeddings.shape[0]
__UpperCamelCase : List[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__UpperCamelCase : Optional[Any] = classifier_free_guidance_embeddings.expand(
lowerCamelCase__ , -1 )
__UpperCamelCase : Tuple = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__UpperCamelCase : Tuple = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__UpperCamelCase : Optional[Any] = self.embedding_proj(lowerCamelCase__ )
__UpperCamelCase : Any = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__UpperCamelCase : List[str] = self.clip_extra_context_tokens_proj(lowerCamelCase__ )
__UpperCamelCase : Tuple = clip_extra_context_tokens.reshape(lowerCamelCase__ , -1 , self.clip_extra_context_tokens )
__UpperCamelCase : Union[str, Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
__UpperCamelCase : int = self.encoder_hidden_states_proj(lowerCamelCase__ )
__UpperCamelCase : Dict = self.text_encoder_hidden_states_norm(lowerCamelCase__ )
__UpperCamelCase : Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 515 |
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] ) -> List[str]:
__UpperCamelCase : Any = [x.strip() for x in open(__lowerCAmelCase ).readlines()]
__UpperCamelCase : Dict = [x.strip() for x in open(__lowerCAmelCase ).readlines()][: len(__lowerCAmelCase )]
__UpperCamelCase : Optional[Any] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
if save_path is not None:
save_json(__lowerCAmelCase , __lowerCAmelCase , indent=__lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 515 | 1 |
'''simple docstring'''
import random
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = num - 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE : List[str] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE : List[Any] = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE : List[Any] = pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if v != 1:
SCREAMING_SNAKE_CASE : Tuple = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE : Any = i + 1
SCREAMING_SNAKE_CASE : str = (v**2) % num
return True
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if num < 2:
return False
SCREAMING_SNAKE_CASE : List[str] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCamelCase_ )
def __A ( lowerCamelCase_ = 10_24 ):
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCamelCase_ ):
return num
if __name__ == "__main__":
__UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 379 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {}
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
SCREAMING_SNAKE_CASE : int = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MarkupLMFeatureExtractionTester(self )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = get_html_strings()[0]
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
SCREAMING_SNAKE_CASE : List[str] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , lowerCamelCase_ )
self.assertEqual(encoding.xpaths , lowerCamelCase_ )
# Test batched
SCREAMING_SNAKE_CASE : str = get_html_strings()
SCREAMING_SNAKE_CASE : str = feature_extractor(lowerCamelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE : int = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
SCREAMING_SNAKE_CASE : List[Any] = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCamelCase_ )
self.assertEqual(encoding.xpaths , lowerCamelCase_ )
| 379 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def __lowerCAmelCase ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
_lowercase: Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_lowercase: int = json.loads(__magic_name__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_lowercase: List[str] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_lowercase: Dict = json.loads(__magic_name__ )
if not mpi_options.get("sagemaker_mpi_enabled" , __magic_name__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def UpperCAmelCase__ ( self : int):
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , _UpperCamelCase , )
@cached_property
def UpperCAmelCase__ ( self : Dict):
logger.info("PyTorch: setting up devices")
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch")
if self.no_cuda:
_lowercase: Optional[Any] = torch.device("cpu")
_lowercase: List[str] = 0
elif is_sagemaker_model_parallel_available():
_lowercase: Any = smp.local_rank()
_lowercase: Tuple = torch.device("cuda" , _UpperCamelCase)
_lowercase: int = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta)
_lowercase: str = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))
_lowercase: Optional[int] = torch.device("cuda" , self.local_rank)
_lowercase: Dict = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_lowercase: int = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_lowercase: Dict = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta)
_lowercase: Optional[Any] = torch.device("cuda" , self.local_rank)
_lowercase: str = 1
if device.type == "cuda":
torch.cuda.set_device(_UpperCamelCase)
return device
@property
def UpperCAmelCase__ ( self : str):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase__ ( self : Tuple):
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase__ ( self : Tuple):
return False
| 206 |
_SCREAMING_SNAKE_CASE : dict[tuple[int, int, int], int] = {}
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowercase: Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowercase: Tuple = _calculate(days - 1 , __magic_name__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowercase: Dict = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowercase: Tuple = _calculate(days - 1 , __magic_name__ , 0 )
_lowercase: List[str] = state_late + state_absent + state_ontime
_lowercase: Optional[int] = prizestrings
return prizestrings
def __lowerCAmelCase ( __magic_name__ = 3_0 ):
return _calculate(__magic_name__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 206 | 1 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ) -> str:
'''simple docstring'''
_A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('RGB' )
_A = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
_A = transform(_snake_case ).unsqueeze(0 ).to(_snake_case )
return image
def _snake_case ( _snake_case : List[str] ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
_A = re.sub('visual_encoder*' , 'vision_model.encoder' , _snake_case )
if "blocks" in key:
_A = re.sub(R'blocks' , 'layers' , _snake_case )
if "attn" in key:
_A = re.sub(R'attn' , 'self_attn' , _snake_case )
if "norm1" in key:
_A = re.sub(R'norm1' , 'layer_norm1' , _snake_case )
if "norm2" in key:
_A = re.sub(R'norm2' , 'layer_norm2' , _snake_case )
if "encoder.norm" in key:
_A = re.sub(R'encoder.norm' , 'post_layernorm' , _snake_case )
if "encoder.patch_embed.proj" in key:
_A = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , _snake_case )
if "encoder.pos_embed" in key:
_A = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , _snake_case )
if "encoder.cls_token" in key:
_A = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , _snake_case )
if "self_attn" in key:
_A = re.sub(R'self_attn.proj' , 'self_attn.projection' , _snake_case )
return key
@torch.no_grad()
def _snake_case ( _snake_case : Optional[int] , _snake_case : List[str]=None ) -> Any:
'''simple docstring'''
if config_path is not None:
_A = BlipConfig.from_pretrained(_snake_case )
else:
_A = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
_A = BlipForConditionalGeneration(_snake_case ).eval()
_A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_A = blip_decoder(pretrained=_snake_case , image_size=3_84 , vit='base' )
_A = pt_model.eval()
_A = pt_model.state_dict()
for key in modified_state_dict.copy():
_A = modified_state_dict.pop(_snake_case )
_A = rename_key(_snake_case )
_A = value
hf_model.load_state_dict(_snake_case )
_A = 3_84
_A = load_demo_image(image_size=_snake_case , device='cpu' )
_A = BertTokenizer.from_pretrained('bert-base-uncased' )
_A = tokenizer(['a picture of'] ).input_ids
_A = hf_model.generate(_snake_case , _snake_case )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_A = hf_model.generate(_snake_case )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_A = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_A = blip_vqa(pretrained=_snake_case , image_size=_snake_case , vit='base' )
vqa_model.eval()
_A = vqa_model.state_dict()
for key in modified_state_dict.copy():
_A = modified_state_dict.pop(_snake_case )
_A = rename_key(_snake_case )
_A = value
_A = BlipForQuestionAnswering(_snake_case )
hf_vqa_model.load_state_dict(_snake_case )
_A = ['How many dogs are in this image?']
_A = tokenizer(_snake_case , return_tensors='pt' ).input_ids
_A = hf_vqa_model.generate(_snake_case , _snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_A = blip_itm(pretrained=_snake_case , image_size=_snake_case , vit='base' )
itm_model.eval()
_A = itm_model.state_dict()
for key in modified_state_dict.copy():
_A = modified_state_dict.pop(_snake_case )
_A = rename_key(_snake_case )
_A = value
_A = BlipForImageTextRetrieval(_snake_case )
_A = ['A picture of a woman with a dog sitting in a beach']
_A = tokenizer(
_snake_case , return_tensors='pt' , padding='max_length' , truncation=_snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_snake_case )
hf_itm_model.eval()
_A = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
_A = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
a = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 7 |
import requests
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {'Content-Type': 'application/json'}
UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase)
if response.status_code != 200:
UpperCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 23 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = """▁"""
UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : str = BigBirdTokenizer
_a : Tuple = BigBirdTokenizerFast
_a : Union[str, Any] = True
_a : Any = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
__lowerCAmelCase = self.tokenizer_class(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "<s>"
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_A ) , 1_0_0_4 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.tokenize(_A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = BigBirdTokenizer(_A , keep_accents=_A )
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(_A , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "Hello World!"
__lowerCAmelCase = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__lowerCAmelCase = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
__lowerCAmelCase = " ".join(_A )
__lowerCAmelCase = self.big_tokenizer.encode_plus(_A , return_tensors="pt" , return_token_type_ids=_A )
__lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_A )
__lowerCAmelCase = BigBirdConfig(attention_type="original_full" )
__lowerCAmelCase = BigBirdModel(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__lowerCAmelCase = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 552 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class a__ :
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=False , _A=True , _A="None" , _A=3 , _A=4 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = relative_attention
__lowerCAmelCase = position_biased_input
__lowerCAmelCase = pos_att_type
__lowerCAmelCase = scope
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModel(config=_A )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaForMaskedLM(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDebertaVaForSequenceClassification(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDebertaVaForTokenClassification(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaForQuestionAnswering(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_a : Union[str, Any] = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : str = False
_a : List[str] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(_A )
@require_tf
class a__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__lowerCAmelCase = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase = model(_A , attention_mask=_A )[0]
__lowerCAmelCase = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1E-4 )
| 552 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_UpperCAmelCase : str = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , **snake_case_ ):
super().__init__(**snake_case_ )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , '''vision''' )
self.check_model_type(snake_case_ )
def __call__( self , snake_case_ , snake_case_ = None , **snake_case_ , ):
if "text_queries" in kwargs:
lowercase =kwargs.pop('''text_queries''' )
if isinstance(snake_case_ , (str, Image.Image) ):
lowercase ={'''image''': image, '''candidate_labels''': candidate_labels}
else:
lowercase =image
lowercase =super().__call__(snake_case_ , **snake_case_ )
return results
def _A( self , **snake_case_ ):
lowercase ={}
if "threshold" in kwargs:
lowercase =kwargs['''threshold''']
if "top_k" in kwargs:
lowercase =kwargs['''top_k''']
return {}, {}, postprocess_params
def _A( self , snake_case_ ):
lowercase =load_image(inputs['''image'''] )
lowercase =inputs['''candidate_labels''']
if isinstance(snake_case_ , snake_case_ ):
lowercase =candidate_labels.split(''',''' )
lowercase =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case_ ):
lowercase =self.tokenizer(snake_case_ , return_tensors=self.framework )
lowercase =self.image_processor(snake_case_ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _A( self , snake_case_ ):
lowercase =model_inputs.pop('''target_size''' )
lowercase =model_inputs.pop('''candidate_label''' )
lowercase =model_inputs.pop('''is_last''' )
lowercase =self.model(**snake_case_ )
lowercase ={'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _A( self , snake_case_ , snake_case_=0.1 , snake_case_=None ):
lowercase =[]
for model_output in model_outputs:
lowercase =model_output['''candidate_label''']
lowercase =BaseModelOutput(snake_case_ )
lowercase =self.image_processor.post_process_object_detection(
outputs=snake_case_ , threshold=snake_case_ , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
lowercase =outputs['''scores'''][index].item()
lowercase =self._get_bounding_box(outputs['''boxes'''][index][0] )
lowercase ={'''score''': score, '''label''': label, '''box''': box}
results.append(snake_case_ )
lowercase =sorted(snake_case_ , key=lambda snake_case_ : x["score"] , reverse=snake_case_ )
if top_k:
lowercase =results[:top_k]
return results
def _A( self , snake_case_ ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
lowercase , lowercase , lowercase , lowercase =box.int().tolist()
lowercase ={
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 72 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct_text_model'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=5_02_44 , snake_case_=7_68 , snake_case_=64 , snake_case_=20_48 , snake_case_=12 , snake_case_=12 , snake_case_=32 , snake_case_=1_28 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=False , snake_case_=0 , snake_case_=1 , snake_case_=False , snake_case_=True , **snake_case_ , ):
lowercase =vocab_size
lowercase =hidden_size
lowercase =d_kv
lowercase =d_ff
lowercase =num_layers
lowercase =num_heads
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =dropout_rate
lowercase =layer_norm_epsilon
lowercase =initializer_factor
lowercase =use_cache
lowercase =eos_token_id
lowercase =decoder_start_token_id
# for backwards compatibility
lowercase =dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(snake_case_ )
lowercase , lowercase =cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct_vision_model'
def __init__( self , snake_case_=7_68 , snake_case_=7_68 , snake_case_=20_48 , snake_case_=64 , snake_case_=12 , snake_case_=12 , snake_case_="gelu_new" , snake_case_=1E-6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=1E-10 , snake_case_=1.0 , snake_case_=40_96 , snake_case_=32 , snake_case_=1_28 , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =hidden_size
lowercase =patch_embed_hidden_size
lowercase =d_ff
lowercase =dropout_rate
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =initializer_range
lowercase =initializer_factor
lowercase =attention_dropout
lowercase =layer_norm_eps
lowercase =dense_act_fn
lowercase =seq_len
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =d_kv
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(snake_case_ )
lowercase , lowercase =cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct'
UpperCamelCase__ = True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=1.0 , snake_case_=0.02 , snake_case_=False , snake_case_=False , snake_case_=True , **snake_case_ , ):
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
lowercase ={}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
lowercase ={}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
lowercase =PixaStructTextConfig(**snake_case_ )
lowercase =PixaStructVisionConfig(**snake_case_ )
lowercase =self.text_config.decoder_start_token_id
lowercase =self.text_config.pad_token_id
lowercase =self.text_config.eos_token_id
lowercase =initializer_factor
lowercase =initializer_range
lowercase =self.initializer_range
lowercase =self.initializer_range
lowercase =is_vqa
@classmethod
def _A( cls , snake_case_ , snake_case_ , **snake_case_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def _A( self ):
lowercase =copy.deepcopy(self.__dict__ )
lowercase =self.text_config.to_dict()
lowercase =self.vision_config.to_dict()
lowercase =self.__class__.model_type
return output
| 72 | 1 |
import math
def A__ ( SCREAMING_SNAKE_CASE__ = 100) -> int:
__snake_case: Tuple = sum(i * i for i in range(1 , n + 1))
__snake_case: List[Any] = int(math.pow(sum(range(1 , n + 1)) , 2))
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 155 |
def A__ ( SCREAMING_SNAKE_CASE__ = 100) -> int:
__snake_case: str = 0
__snake_case: int = 0
for i in range(1 , n + 1):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 155 | 1 |
"""simple docstring"""
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_SCREAMING_SNAKE_CASE = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__snake_case = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE , id=SCREAMING_SNAKE_CASE )
| 163 |
"""simple docstring"""
import argparse
import json
import subprocess
def __a ( a, a ):
"""simple docstring"""
_a = []
_a = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
_a = subprocess.run(a, shell=a, stdout=subprocess.PIPE )
_a = output.stdout.decode("utf-8" )
_a = json.loads(a )
_a = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(a )
# save the result so we can report them on Slack
with open("offline_runners.txt", "w" ) as fp:
fp.write(json.dumps(a ) )
if len(a ) > 0:
_a = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __a ( a ):
"""simple docstring"""
return values.split("," )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 388 | 0 |
from __future__ import annotations
from random import choice
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
return choice(_A )
def a_ ( _A , _A ) -> int:
"""simple docstring"""
snake_case__ = random_pivot(_A )
# partition based on pivot
# linear time
snake_case__ = [e for e in lst if e < pivot]
snake_case__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_A ) < k - 1:
return kth_number(_A , k - len(_A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE( a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = StableDiffusionSAGPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> Any:
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0 )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
snake_case__ = CLIPTextModel(UpperCamelCase )
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: Optional[int]=0 ) -> Union[str, Any]:
if str(UpperCamelCase ).startswith('mps' ):
snake_case__ = torch.manual_seed(UpperCamelCase )
else:
snake_case__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
snake_case__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
snake_case__ = sag_pipe.to(UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = '.'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sag_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
snake_case__ = output.images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
snake_case__ = sag_pipe.to(UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = '.'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sag_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
snake_case__ = output.images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
snake_case__ = sag_pipe.to(UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = '.'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
snake_case__ = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 372 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.