code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def a_ ( _A ) -> list[int]:
"""simple docstring"""
return [ord(_A ) - 96 for elem in plain]
def a_ ( _A ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def a_ ( ) -> None:
"""simple docstring"""
snake_case__ = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , _A )
print('Decoded:' , decode(_A ) )
if __name__ == "__main__":
main()
| 328 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self: int , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=7 , UpperCamelCase: List[Any]=3 , UpperCamelCase: List[Any]=30 , UpperCamelCase: List[Any]=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=None , UpperCamelCase: Tuple=True , UpperCamelCase: List[str]=[0.5, 0.5, 0.5] , UpperCamelCase: Dict=[0.5, 0.5, 0.5] , UpperCamelCase: Tuple=True , UpperCamelCase: List[str]=1 / 2_55 , UpperCamelCase: Union[str, Any]=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_pad
def lowerCAmelCase_ ( self: Dict ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self: Any , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=False ) -> int:
if not batched:
snake_case__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size['shortest_edge'] * h / w )
snake_case__ = self.size['shortest_edge']
elif w > h:
snake_case__ = self.size['shortest_edge']
snake_case__ = int(self.size['shortest_edge'] * w / h )
else:
snake_case__ = self.size['shortest_edge']
snake_case__ = self.size['shortest_edge']
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
snake_case__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case__ = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
snake_case__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self: str ) -> Any:
# prepare image and target
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
snake_case__ = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
snake_case__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase ) )
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
# prepare image, target and masks_path
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
snake_case__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case__ = ConditionalDetrImageProcessor(format='coco_panoptic' )
snake_case__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCamelCase )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase ) )
| 328 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowercase = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=None ) -> Union[str, Any]:
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ : Optional[Any] =XLNetConfig.from_json_file(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : Optional[int] =finetuning_task
SCREAMING_SNAKE_CASE_ : Dict =GLUE_TASKS_NUM_LABELS[finetuning_task]
SCREAMING_SNAKE_CASE_ : str =XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
SCREAMING_SNAKE_CASE_ : Any =finetuning_task
SCREAMING_SNAKE_CASE_ : Union[str, Any] =XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] =XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : List[str] =os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f'Save configuration file to {os.path.abspath(lowerCAmelCase__ )}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
_lowercase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 706 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> int:
def wrapper(*UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE_ : Optional[int] =timeit.default_timer()
SCREAMING_SNAKE_CASE_ : Dict =func(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE_ : Any =func.__name__
return wrapper
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : Any=1_0_0 , UpperCAmelCase_ : Dict=None ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
SCREAMING_SNAKE_CASE_ : List[str] =seq_shapes or {}
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase_ , _ArrayXD ):
SCREAMING_SNAKE_CASE_ : Any =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase_ , datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''The small grey turtle was surprisingly fast when challenged.'''
else:
SCREAMING_SNAKE_CASE_ : int =np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase_ , datasets.Sequence ):
while isinstance(UpperCAmelCase_ , datasets.Sequence ):
SCREAMING_SNAKE_CASE_ : Tuple =v.feature
SCREAMING_SNAKE_CASE_ : Optional[Any] =seq_shapes[k]
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.random.rand(*UpperCAmelCase_ ).astype(v.dtype )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=1_0_0 , UpperCAmelCase_ : Dict=None ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[int] =generate_examples(UpperCAmelCase_ , num_examples=UpperCAmelCase_ , seq_shapes=UpperCAmelCase_ )
with ArrowWriter(features=UpperCAmelCase_ , path=UpperCAmelCase_ ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE_ : List[Any] =features.encode_example(UpperCAmelCase_ )
writer.write(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
SCREAMING_SNAKE_CASE_ : List[Any] =datasets.Dataset.from_file(filename=UpperCAmelCase_ , info=datasets.DatasetInfo(features=UpperCAmelCase_ ) )
return dataset
| 431 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__UpperCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(A )
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[str] , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , """vision""" )
self.check_model_type(_lowerCamelCase )
def __call__( self : Optional[Any] , _lowerCamelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , _lowerCamelCase : Union[str, List[str]] = None , **_lowerCamelCase : Optional[int] , ):
'''simple docstring'''
if "text_queries" in kwargs:
__lowerCamelCase : List[str] = kwargs.pop("""text_queries""" )
if isinstance(_lowerCamelCase , (str, Image.Image) ):
__lowerCamelCase : Tuple = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase : Union[str, Any] = image
__lowerCamelCase : Dict = super().__call__(_lowerCamelCase , **_lowerCamelCase )
return results
def _snake_case ( self : Any , **_lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = {}
if "threshold" in kwargs:
__lowerCamelCase : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase : Dict = kwargs["""top_k"""]
return {}, {}, postprocess_params
def _snake_case ( self : Optional[int] , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = load_image(inputs["""image"""] )
__lowerCamelCase : Optional[Any] = inputs["""candidate_labels"""]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : Tuple = candidate_labels.split(""",""" )
__lowerCamelCase : List[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_lowerCamelCase ):
__lowerCamelCase : Union[str, Any] = self.tokenizer(_lowerCamelCase , return_tensors=self.framework )
__lowerCamelCase : List[str] = self.image_processor(_lowerCamelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_lowerCamelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _snake_case ( self : Optional[Any] , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = model_inputs.pop("""target_size""" )
__lowerCamelCase : Any = model_inputs.pop("""candidate_label""" )
__lowerCamelCase : Any = model_inputs.pop("""is_last""" )
__lowerCamelCase : Optional[int] = self.model(**_lowerCamelCase )
__lowerCamelCase : Dict = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def _snake_case ( self : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int=0.1 , _lowerCamelCase : Dict=None ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
for model_output in model_outputs:
__lowerCamelCase : Dict = model_output["""candidate_label"""]
__lowerCamelCase : Any = BaseModelOutput(_lowerCamelCase )
__lowerCamelCase : Dict = self.image_processor.post_process_object_detection(
outputs=_lowerCamelCase , threshold=_lowerCamelCase , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase : Union[str, Any] = outputs["""scores"""][index].item()
__lowerCamelCase : Optional[int] = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase : Dict = {"""score""": score, """label""": label, """box""": box}
results.append(_lowerCamelCase )
__lowerCamelCase : int = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x["score"] , reverse=_lowerCamelCase )
if top_k:
__lowerCamelCase : Any = results[:top_k]
return results
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = box.int().tolist()
__lowerCamelCase : Tuple = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 519 |
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
if n == 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
__lowerCamelCase : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 2
while digits < n:
index += 1
__lowerCamelCase : str = len(str(fibonacci(UpperCAmelCase ) ) )
return index
def _UpperCAmelCase ( UpperCAmelCase : int = 1_000 ):
"""simple docstring"""
return fibonacci_digits_index(UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 519 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_: Tuple = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["pixel_values"]
def __init__( self, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = PILImageResampling.BILINEAR, _UpperCAmelCase = True, _UpperCAmelCase = 1 / 255, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = size if size is not None else {"shortest_edge": 384}
lowercase__ = get_size_dict(_UpperCAmelCase, default_to_square=_UpperCAmelCase )
lowercase__ = do_resize
lowercase__ = size
# Default value set here for backwards compatibility where the value in config is None
lowercase__ = crop_pct if crop_pct is not None else 224 / 256
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = PILImageResampling.BICUBIC, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = get_size_dict(_UpperCAmelCase, default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowercase__ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase__ = int(shortest_edge / crop_pct )
lowercase__ = get_resize_output_image_size(_UpperCAmelCase, size=_UpperCAmelCase, default_to_square=_UpperCAmelCase )
lowercase__ = resize(image=_UpperCAmelCase, size=_UpperCAmelCase, resample=_UpperCAmelCase, data_format=_UpperCAmelCase, **_UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_UpperCAmelCase, size=(shortest_edge, shortest_edge), data_format=_UpperCAmelCase, **_UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_UpperCAmelCase, size=(shortest_edge, shortest_edge), resample=_UpperCAmelCase, data_format=_UpperCAmelCase, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
return rescale(_UpperCAmelCase, scale=_UpperCAmelCase, data_format=_UpperCAmelCase, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
return normalize(_UpperCAmelCase, mean=_UpperCAmelCase, std=_UpperCAmelCase, data_format=_UpperCAmelCase, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = ChannelDimension.FIRST, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = crop_pct if crop_pct is not None else self.crop_pct
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(_UpperCAmelCase, default_to_square=_UpperCAmelCase )
lowercase__ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=_UpperCAmelCase, size=_UpperCAmelCase, crop_pct=_UpperCAmelCase, resample=_UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=_UpperCAmelCase, scale=_UpperCAmelCase ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=_UpperCAmelCase, mean=_UpperCAmelCase, std=_UpperCAmelCase ) for image in images]
lowercase__ = [to_channel_dimension_format(_UpperCAmelCase, _UpperCAmelCase ) for image in images]
lowercase__ = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
| 708 | """simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[str] , A__ : int , A__ : Dict=7 , A__ : Optional[Any]=3 , A__ : List[Any]=18 , A__ : Any=30 , A__ : List[str]=400 , A__ : int=True , A__ : Tuple=None , A__ : Any=True , A__ : Tuple=[0.5, 0.5, 0.5] , A__ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__lowerCamelCase : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
__lowerCamelCase : Tuple = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : List[str] = min_resolution
__lowerCamelCase : int = max_resolution
__lowerCamelCase : List[Any] = do_resize
__lowerCamelCase : Any = size
__lowerCamelCase : Dict = do_normalize
__lowerCamelCase : Union[str, Any] = image_mean
__lowerCamelCase : List[str] = image_std
def a_ ( self : List[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
snake_case__ : List[Any] = DPTImageProcessor if is_vision_available() else None
def a_ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : int = DPTImageProcessingTester(self )
@property
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self : Any ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
def a_ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowerCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a_ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
__lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase : int = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
__lowerCamelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase : Optional[int] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self : Any ):
"""simple docstring"""
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
__lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase : Tuple = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 150 |
'''simple docstring'''
from torch import nn
def __lowercase (_lowercase ) -> Union[str, Any]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 150 | 1 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str ) -> Any:
"""simple docstring"""
assert x is not None
assert y is not None
A__ : int = len(__UpperCamelCase )
A__ : Union[str, Any] = len(__UpperCamelCase )
# declaring the array for storing the dp values
A__ : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
A__ : Any = 1 if x[i - 1] == y[j - 1] else 0
A__ : Tuple = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
A__ : Optional[int] = ''''''
A__ , A__ : List[str] = m, n
while i > 0 and j > 0:
A__ : List[str] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
A__ : Optional[int] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = 'AGGTAB'
_SCREAMING_SNAKE_CASE : Optional[int] = 'GXTXAYB'
_SCREAMING_SNAKE_CASE : Optional[Any] = 4
_SCREAMING_SNAKE_CASE : int = 'GTAB'
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod() | 55 |
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution()) | 55 | 1 |
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase_: Optional[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Union[str, Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case__ : Tuple = get_sagemaker_input()
else:
snake_case__ : int = get_cluster_input()
return config
def _lowercase ( UpperCAmelCase_=None):
"""simple docstring"""
if subparsers is not None:
snake_case__ : str = subparsers.add_parser("""config""" , description=SCREAMING_SNAKE_CASE__)
else:
snake_case__ : Union[str, Any] = argparse.ArgumentParser("""Accelerate config command""" , description=SCREAMING_SNAKE_CASE__)
parser.add_argument(
"""--config_file""" , default=SCREAMING_SNAKE_CASE__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__)
return parser
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : str = get_user_input()
if args.config_file is not None:
snake_case__ : Optional[int] = args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
os.makedirs(SCREAMING_SNAKE_CASE__)
snake_case__ : Optional[int] = default_yaml_config_file
if config_file.endswith(""".json"""):
config.to_json_file(SCREAMING_SNAKE_CASE__)
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE__)
print(F'accelerate configuration saved at {config_file}')
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Any = config_command_parser()
snake_case__ : str = parser.parse_args()
config_command(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
main()
| 648 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Optional[int] = XCLIPTextConfig()
# derive patch size from model name
__a : List[str] = model_name.find('patch' )
__a : int = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
__a : Optional[Any] = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE__ , num_frames=SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
__a : List[Any] = 768
__a : List[str] = 3072
__a : str = 12
__a : str = 1024
__a : Optional[int] = 4096
__a : Optional[Any] = 16
__a : str = 24
__a : List[str] = 768
__a : Union[str, Any] = 3072
if model_name == "xclip-large-patch14-16-frames":
__a : Optional[int] = 336
__a : Dict = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
__a : Any = 768
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
# text encoder
if name == "token_embedding.weight":
__a : str = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
__a : int = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
__a : Optional[Any] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
__a : Optional[Any] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
__a : str = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
__a : Union[str, Any] = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
__a : List[str] = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
__a : Dict = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
__a : str = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
__a : int = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
__a : Tuple = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
__a : Optional[Any] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
__a : Tuple = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
__a : int = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
__a : int = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
__a : List[str] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
__a : Tuple = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
__a : int = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
__a : Dict = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
__a : Union[str, Any] = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
__a : List[Any] = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
__a : Any = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for key in orig_state_dict.copy().keys():
__a : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "attn.in_proj" in key:
__a : Dict = key.split('.' )
if key.startswith('visual' ):
__a : Optional[Any] = key_split[3]
__a : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__a : List[Any] = val[
:dim, :
]
__a : List[Any] = val[
dim : dim * 2, :
]
__a : List[str] = val[
-dim:, :
]
else:
__a : Dict = val[
:dim
]
__a : Dict = val[
dim : dim * 2
]
__a : List[Any] = val[
-dim:
]
else:
if "weight" in key:
__a : Optional[int] = val[
:dim, :
]
__a : str = val[
dim : dim * 2, :
]
__a : str = val[
-dim:, :
]
else:
__a : Union[str, Any] = val[:dim]
__a : List[Any] = val[
dim : dim * 2
]
__a : Optional[int] = val[-dim:]
elif key.startswith('mit' ):
__a : Any = key_split[2]
__a : Dict = config.vision_config.mit_hidden_size
if "weight" in key:
__a : List[Any] = val[:dim, :]
__a : Union[str, Any] = val[dim : dim * 2, :]
__a : Tuple = val[-dim:, :]
else:
__a : Optional[int] = val[:dim]
__a : str = val[dim : dim * 2]
__a : Dict = val[-dim:]
else:
__a : Union[str, Any] = key_split[2]
__a : Any = config.text_config.hidden_size
if "weight" in key:
__a : Tuple = val[:dim, :]
__a : int = val[
dim : dim * 2, :
]
__a : List[Any] = val[-dim:, :]
else:
__a : Dict = val[:dim]
__a : Dict = val[
dim : dim * 2
]
__a : Union[str, Any] = val[-dim:]
else:
__a : Dict = rename_key(SCREAMING_SNAKE_CASE__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__a : str = val.T
__a : Union[str, Any] = val
return orig_state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
if num_frames == 8:
__a : Optional[Any] = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
__a : List[Any] = 'eating_spaghetti.npy'
elif num_frames == 32:
__a : Union[str, Any] = 'eating_spaghetti_32_frames.npy'
__a : List[str] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=SCREAMING_SNAKE_CASE__ , repo_type='dataset' , )
__a : List[str] = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ):
__a : Tuple = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
__a : str = model_to_url[model_name]
__a : Optional[Any] = 8
if "16-frames" in model_name:
__a : List[Any] = 16
elif "shot" in model_name:
__a : List[Any] = 32
__a : Optional[int] = get_xclip_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[Any] = XCLIPModel(SCREAMING_SNAKE_CASE__ )
model.eval()
if "drive" in checkpoint_url:
__a : Union[str, Any] = 'pytorch_model.bin'
gdown.cached_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ )
__a : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
else:
__a : Any = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ )['model']
__a : Dict = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = XCLIPModel(SCREAMING_SNAKE_CASE__ )
__a , __a : Any = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__a : Dict = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
__a : Tuple = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
__a : str = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
__a : int = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Dict = prepare_video(SCREAMING_SNAKE_CASE__ )
__a : Any = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
__a : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
# Verify outputs
__a : int = outputs.logits_per_video
__a : Optional[int] = logits_per_video.softmax(dim=1 )
print('Probs:' , SCREAMING_SNAKE_CASE__ )
# kinetics-400
if model_name == "xclip-base-patch32":
__a : str = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__a : Optional[int] = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
__a : Any = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__a : Tuple = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
__a : Tuple = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__a : Tuple = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__a : List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__a : str = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__a : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__a : Optional[int] = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__a : Union[str, Any] = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__a : Dict = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__a : str = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__a : str = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__a : str = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__a : Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__a : Any = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__a : str = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 597 | 0 |
def __A ( _A = 10**12 ):
"""simple docstring"""
__a = 1
__a = 0
__a = 1
__a = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 710 | from __future__ import annotations
def __A ( _A ):
"""simple docstring"""
__a = [True] * limit
__a = False
__a = False
__a = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__a = i * 2
while index < limit:
__a = False
__a = index + i
__a = [2]
for i in range(3 , _A , 2 ):
if is_prime[i]:
primes.append(_A )
return primes
def __A ( _A = 100_0000 ):
"""simple docstring"""
__a = prime_sieve(_A )
__a = 0
__a = 0
for i in range(len(_A ) ):
for j in range(i + length , len(_A ) ):
__a = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__a = j - i
__a = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 525 | 0 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def __A ( a_ :Any , a_ :int=None) -> List[str]:
require_version(deps[pkg] , a_) | 52 | import os
def A__ ( lowercase: str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(lowercase ), lowercase ) ) as input_file:
A : Dict =[
[int(lowercase ) for element in line.split(',' )]
for line in input_file.readlines()
]
A : Optional[int] =len(lowercase )
A : Optional[int] =len(matrix[0] )
A : Optional[int] =[[-1 for _ in range(lowercase )] for _ in range(lowercase )]
for i in range(lowercase ):
A : Optional[int] =matrix[i][0]
for j in range(1, lowercase ):
for i in range(lowercase ):
A : Optional[int] =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1, lowercase ):
A : Union[str, Any] =min(
minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2, -1, -1 ):
A : Tuple =min(
minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 305 | 0 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_A : Union[str, Any] = logging.get_logger(__name__)
_A : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
_A : List[Any] = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_A : Optional[Any] = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
_A : Optional[int] = '''▁'''
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[int]=1_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"""<extra_id_{i}>""" for i in range(SCREAMING_SNAKE_CASE__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda SCREAMING_SNAKE_CASE__ : bool("""extra_id""" in str(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
f"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
__lowerCAmelCase = legacy
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , extra_ids=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , legacy=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = extra_ids
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__lowerCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , SCREAMING_SNAKE_CASE__ , )
return max_model_length
@property
def a ( self : List[Any] ) -> Optional[Any]:
return self.sp_model.get_piece_size() + self._extra_ids
def a ( self : Optional[Any] ) -> str:
__lowerCAmelCase = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : str , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def a ( self : Tuple ) -> Union[str, Any]:
return list(
set(filter(lambda SCREAMING_SNAKE_CASE__ : bool(re.search(R"""<extra_id_\d+>""" , SCREAMING_SNAKE_CASE__ ) ) is not None , self.additional_special_tokens ) ) )
def a ( self : Dict ) -> str:
return [self._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) for token in self.get_sentinel_tokens()]
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] ) -> List[int]:
if len(SCREAMING_SNAKE_CASE__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
return token_ids_a + token_ids_a
def __getstate__( self : str ) -> Tuple:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : "TextInput" , **SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__lowerCAmelCase = SPIECE_UNDERLINE + text.replace(SCREAMING_SNAKE_CASE__ , """ """ )
return super().tokenize(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
if not self.legacy:
__lowerCAmelCase = text.startswith(SCREAMING_SNAKE_CASE__ )
if is_first:
__lowerCAmelCase = text[1:]
__lowerCAmelCase = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
if token.startswith("""<extra_id_""" ):
__lowerCAmelCase = re.match(R"""<extra_id_(\d+)>""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
if index < self.sp_model.get_piece_size():
__lowerCAmelCase = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase = f"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = """"""
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 330 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[str] = logging.get_logger(__name__)
_A : Any = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = """nllb-moe"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str=12_81_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10_24 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : List[str]=40_96 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : int=40_96 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : List[str]=0.0_5 , SCREAMING_SNAKE_CASE__ : List[str]=0.0_5 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]="relu" , SCREAMING_SNAKE_CASE__ : Optional[int]=10_24 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[Any]="float32" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=1_28 , SCREAMING_SNAKE_CASE__ : Dict=64 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_0_1 , SCREAMING_SNAKE_CASE__ : List[str]=0.0_0_1 , SCREAMING_SNAKE_CASE__ : str="all" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=1.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.2 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = router_z_loss_coef
__lowerCAmelCase = router_aux_loss_coef
__lowerCAmelCase = decoder_sparse_step
__lowerCAmelCase = encoder_sparse_step
__lowerCAmelCase = num_experts
__lowerCAmelCase = expert_capacity
__lowerCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__lowerCAmelCase = router_dtype
__lowerCAmelCase = router_ignore_padding_tokens
__lowerCAmelCase = batch_prioritized_routing
__lowerCAmelCase = second_expert_policy
__lowerCAmelCase = normalize_router_prob_before_dropping
__lowerCAmelCase = moe_eval_capacity_token_fraction
__lowerCAmelCase = moe_token_dropout
__lowerCAmelCase = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 330 | 1 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A_ = logging.WARNING
def UpperCAmelCase ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = os.getenv('''DATASETS_VERBOSITY''' ,UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def UpperCAmelCase ( )-> str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def UpperCAmelCase ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase ( UpperCAmelCase = None )-> logging.Logger:
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE_ = _get_library_name()
return logging.getLogger(UpperCAmelCase )
def UpperCAmelCase ( )-> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase ( UpperCAmelCase )-> None:
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Union[str, Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Dict:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class snake_case :
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = args[0] if args else None
def __iter__( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
def empty_fn(*lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ) -> List[str]:
"""simple docstring"""
return self
def __exit__( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> Dict:
"""simple docstring"""
return
A_ = True
class snake_case :
'''simple docstring'''
def __call__( self : Union[str, Any] , *lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : int , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A_ = _tqdm_cls()
def UpperCAmelCase ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = True
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = False
| 393 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
A_ = get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 )-> List[str]:
'''simple docstring'''
os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase )
with FSDP.state_dict_type(
UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase )
logger.info(f'''Saving model to {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=UpperCAmelCase ,storage_writer=dist_cp.FileSystemWriter(UpperCAmelCase ) ,planner=DefaultSavePlanner() ,)
logger.info(f'''Model saved to {ckpt_dir}''' )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 )-> List[str]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
SCREAMING_SNAKE_CASE_ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Loading model from {input_model_file}''' )
SCREAMING_SNAKE_CASE_ = torch.load(UpperCAmelCase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Loading model from {input_model_file}''' )
SCREAMING_SNAKE_CASE_ = torch.load(UpperCAmelCase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
os.path.join(UpperCAmelCase ,f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=UpperCAmelCase ,storage_reader=dist_cp.FileSystemReader(UpperCAmelCase ) ,planner=DefaultLoadPlanner() ,)
SCREAMING_SNAKE_CASE_ = state_dict['''model''']
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 )-> int:
'''simple docstring'''
os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase )
with FSDP.state_dict_type(
UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict(UpperCAmelCase ,UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE_ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} ,storage_writer=dist_cp.FileSystemWriter(UpperCAmelCase ) ,planner=DefaultSavePlanner() ,)
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 )-> Any:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE_ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
SCREAMING_SNAKE_CASE_ = torch.load(UpperCAmelCase )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
SCREAMING_SNAKE_CASE_ = (
os.path.join(UpperCAmelCase ,f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() ,optimizer_key='''optimizer''' ,storage_reader=dist_cp.FileSystemReader(UpperCAmelCase ) ,)
SCREAMING_SNAKE_CASE_ = optim_state['''optimizer''']
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict_to_load(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
optimizer.load_state_dict(UpperCAmelCase )
| 393 | 1 |
"""simple docstring"""
import operator as op
__A : Optional[Any] = 'scaler.pt'
__A : Dict = 'pytorch_model'
__A : Optional[Any] = 'random_states'
__A : List[Any] = 'optimizer'
__A : Optional[int] = 'scheduler'
__A : Union[str, Any] = 'pytorch_model.bin'
__A : Union[str, Any] = 'pytorch_model.bin.index.json'
__A : Optional[Any] = 'model.safetensors'
__A : Dict = 'model.safetensors.index.json'
__A : Optional[Any] = '1.10.2'
__A : List[str] = 'py38'
__A : List[Any] = '4.17.0'
__A : str = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
__A : Optional[Any] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
__A : Optional[int] = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
__A : Optional[Any] = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
__A : List[str] = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
__A : Dict = '2.0.1'
__A : Dict = ['pdsh', 'standard', 'openmpi', 'mvapich']
__A : str = ['default', 'reduce-overhead', 'max-autotune']
__A : Tuple = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__A : Union[str, Any] = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
__A : int = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
__A : Dict = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 281 |
"""simple docstring"""
from collections import deque
def snake_case__ ( _lowerCamelCase ) ->Dict:
"""simple docstring"""
__lowercase : Any = len(_lowerCamelCase )
__lowercase : str = deque()
__lowercase : Tuple = [False for _ in range(_lowerCamelCase )]
__lowercase : Optional[int] = [-1 for _ in range(_lowerCamelCase )]
__lowercase : Dict = index_of[:]
def strong_connect(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ):
__lowercase : List[str] = index # the number when this node is seen
__lowercase : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__lowercase : List[Any] = True
for w in g[v]:
if index_of[w] == -1:
__lowercase : Optional[int] = strong_connect(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
__lowercase : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowercase : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowercase : Any = []
__lowercase : List[Any] = stack.pop()
__lowercase : Any = False
component.append(_lowerCamelCase )
while w != v:
__lowercase : List[str] = stack.pop()
__lowercase : Dict = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__lowercase : Tuple = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase, 0, _lowerCamelCase )
return components
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Tuple:
"""simple docstring"""
__lowercase : int = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
__A : int = 7
__A : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__A : List[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__A : List[str] = [(u, v) for u, v in zip(source, target)]
__A : List[str] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 281 | 1 |
"""simple docstring"""
from copy import deepcopy
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : list[int] | None = None ,A_ : int | None = None ) -> None:
if arr is None and size is not None:
A = size
A = [0] * size
elif arr is not None:
self.init(A_ )
else:
raise ValueError('Either arr or size must be specified' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : list[int] ) -> None:
A = len(A_ )
A = deepcopy(A_ )
for i in range(1 ,self.size ):
A = self.next_(A_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> list[int]:
A = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
A = self.next_(A_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : int ) -> int:
return index + (index & (-index))
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : int ) -> int:
return index - (index & (-index))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ,A_ : int ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A = self.next_(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : int ) -> None:
self.add(A_ ,value - self.get(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> int:
if right == 0:
return 0
A = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A = self.prev(A_ )
return result
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
return self.prefix(A_ ) - self.prefix(A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int ) -> int:
return self.query(A_ ,index + 1 )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int ) -> int:
value -= self.tree[0]
if value < 0:
return -1
A = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
snake_case : List[str] = [8, 5, 9, 7]
snake_case : int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
snake_case : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
"""simple docstring"""
__lowercase = claim_vector
__lowercase = allocated_resources_table
__lowercase = maximum_claim_table
def __magic_name__ ( self ):
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ):
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ):
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ):
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.__need()
__lowercase = self.__allocated_resources_table
__lowercase = self.__available_resources()
__lowercase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 5_0 + """\n""" )
while need_list:
__lowercase = False
for each_need in need_list:
__lowercase = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
__lowercase = False
break
if execution:
__lowercase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowercase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
__lowercase = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def __magic_name__ ( self ):
"""simple docstring"""
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Optional[int] = (UnCLIPScheduler,)
def A ( self : str , **A_ : Optional[int] )-> Union[str, Any]:
__UpperCamelCase = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**A_ )
return config
def A ( self : Any )-> Optional[int]:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A_ )
def A ( self : str )-> str:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=A_ )
def A ( self : Tuple )-> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def A ( self : List[Any] )-> Any:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=A_ )
def A ( self : Tuple )-> List[Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=A_ )
def A ( self : Dict )-> Optional[int]:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=A_ , prev_timestep=A_ )
def A ( self : Union[str, Any] )-> List[Any]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(variance_type="fixed_small_log" )
__UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_994_987 ) ) < 1e-5
def A ( self : Any )-> Union[str, Any]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(variance_type="learned_range" )
__UpperCamelCase = scheduler_class(**A_ )
__UpperCamelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=A_ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=A_ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=A_ ) - -0.0_010_011 < 1e-5
def A ( self : Optional[Any] )-> Union[str, Any]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
__UpperCamelCase = scheduler.timesteps
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(A_ ):
# 1. predict noise residual
__UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def A ( self : str )-> Union[str, Any]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
scheduler.set_timesteps(25 )
__UpperCamelCase = scheduler.timesteps
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(A_ ):
# 1. predict noise residual
__UpperCamelCase = model(A_ , A_ )
if i + 1 == timesteps.shape[0]:
__UpperCamelCase = None
else:
__UpperCamelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(
A_ , A_ , A_ , prev_timestep=A_ , generator=A_ ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def A ( self : int )-> Optional[Any]:
pass
def A ( self : int )-> Optional[Any]:
pass | 228 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Any = StableDiffusionControlNetImgaImgPipeline
_snake_case : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
_snake_case : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : str = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
_snake_case : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : int )-> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
__UpperCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCamelCase = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A ( self : str , A_ : Optional[int] , A_ : Optional[Any]=0 )-> List[Any]:
if str(A_ ).startswith("mps" ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = 2
__UpperCamelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , )
__UpperCamelCase = floats_tensor(control_image.shape , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((64, 64) )
__UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def A ( self : List[str] )-> List[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A ( self : List[Any] )-> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def A ( self : Any )-> Dict:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __UpperCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = StableDiffusionControlNetImgaImgPipeline
_snake_case : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
_snake_case : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A ( self : Optional[Any] )-> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(A_ : int ):
if isinstance(A_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__UpperCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A_ )
torch.manual_seed(0 )
__UpperCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A_ )
torch.manual_seed(0 )
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCamelCase = MultiControlNetModel([controlneta, controlneta] )
__UpperCamelCase = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A ( self : int , A_ : Dict , A_ : str=0 )-> Optional[Any]:
if str(A_ ).startswith("mps" ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = 2
__UpperCamelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , ),
]
__UpperCamelCase = floats_tensor(control_image[0].shape , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((64, 64) )
__UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def A ( self : List[str] )-> List[str]:
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
pipe.to(A_ )
__UpperCamelCase = 10.0
__UpperCamelCase = 4
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = steps
__UpperCamelCase = scale
__UpperCamelCase = pipe(**A_ )[0]
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = steps
__UpperCamelCase = scale
__UpperCamelCase = pipe(**A_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = steps
__UpperCamelCase = scale
__UpperCamelCase = pipe(**A_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = steps
__UpperCamelCase = scale
__UpperCamelCase = pipe(**A_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def A ( self : Optional[int] )-> Any:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A ( self : List[Any] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def A ( self : int )-> Optional[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def A ( self : List[str] )-> Tuple:
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(A_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] )-> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : str )-> Optional[Any]:
__UpperCamelCase = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
__UpperCamelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=A_ , controlnet=A_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase = "evil space-punk bird"
__UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_12, 5_12) )
__UpperCamelCase = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_12, 5_12) )
__UpperCamelCase = pipe(
A_ , A_ , control_image=A_ , generator=A_ , output_type="np" , num_inference_steps=50 , strength=0.6 , )
__UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
__UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2 | 228 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__snake_case : List[Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
__snake_case : List[str] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(lowerCamelCase_ )
__snake_case : List[str] = 1
for i in range(1 , lowerCamelCase_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''fnet'''
def __init__( self : Any , lowerCamelCase_ : List[str]=3_20_00 , lowerCamelCase_ : List[Any]=7_68 , lowerCamelCase_ : Union[str, Any]=12 , lowerCamelCase_ : Optional[Any]=30_72 , lowerCamelCase_ : Optional[Any]="gelu_new" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Union[str, Any]=5_12 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : Any=False , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=3 , lowerCamelCase_ : int=1 , lowerCamelCase_ : Optional[Any]=2 , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : int = use_tpu_fourier_optimizations
SCREAMING_SNAKE_CASE : List[Any] = tpu_short_seq_length
| 379 | 0 |
from math import factorial
def UpperCamelCase (lowercase_: int = 100 ) -> int:
return sum(map(lowercase_ , str(factorial(lowercase_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 716 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Dict = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : int = ["""pixel_values"""]
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BICUBIC , __snake_case = True , __snake_case = None , __snake_case = True , __snake_case = 1 / 255 , __snake_case = True , __snake_case = None , __snake_case = None , __snake_case = True , **__snake_case , ):
super().__init__(**__snake_case )
_SCREAMING_SNAKE_CASE : int = size if size is not None else {"""shortest_edge""": 224}
_SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(__snake_case , default_to_square=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_SCREAMING_SNAKE_CASE : Any = get_size_dict(__snake_case , default_to_square=__snake_case , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE : str = do_resize
_SCREAMING_SNAKE_CASE : Union[str, Any] = size
_SCREAMING_SNAKE_CASE : List[str] = resample
_SCREAMING_SNAKE_CASE : str = do_center_crop
_SCREAMING_SNAKE_CASE : Optional[Any] = crop_size
_SCREAMING_SNAKE_CASE : Tuple = do_rescale
_SCREAMING_SNAKE_CASE : Dict = rescale_factor
_SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
_SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_SCREAMING_SNAKE_CASE : Optional[int] = do_convert_rgb
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BICUBIC , __snake_case = None , **__snake_case , ):
_SCREAMING_SNAKE_CASE : int = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(__snake_case , size=size["""shortest_edge"""] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ):
_SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__snake_case , param_name="""size""" , default_to_square=__snake_case )
_SCREAMING_SNAKE_CASE : Any = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__snake_case , param_name="""crop_size""" , default_to_square=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE : Optional[Any] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE : Optional[int] = [convert_to_rgb(__snake_case ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : str = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE : Optional[Any] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE : Tuple = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : List[Any] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE : Dict = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
_SCREAMING_SNAKE_CASE : str = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
_SCREAMING_SNAKE_CASE : int = {"""pixel_values""": images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 533 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _a ( __lowerCAmelCase : str="ro" , __lowerCAmelCase : Tuple="en" , __lowerCAmelCase : Tuple="wmt16" , __lowerCAmelCase : Any=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
snake_case__ : Union[str, Any] = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
snake_case__ : str = datasets.load_dataset(_UpperCamelCase , _UpperCamelCase )
if save_dir is None:
snake_case__ : List[Any] = F"""{dataset}-{pair}"""
snake_case__ : Optional[Any] = Path(_UpperCamelCase )
save_dir.mkdir(exist_ok=_UpperCamelCase )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
snake_case__ : List[Any] = '''val''' if split == '''validation''' else split
snake_case__ : Optional[Any] = save_dir.joinpath(F"""{fn}.source""" )
snake_case__ : Optional[int] = save_dir.joinpath(F"""{fn}.target""" )
snake_case__ : List[str] = src_path.open('''w+''' )
snake_case__ : Union[str, Any] = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case__ : Any = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 716 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowerCAmelCase__ : List[Any] = parser.parse_args()
lowerCAmelCase__ : Optional[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ : Optional[int] = CLIPImageProcessor()
lowerCAmelCase__ : str = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowerCAmelCase__ : Dict = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 502 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any ={
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( __a ):
"""simple docstring"""
A__ : str = '''xlm-roberta'''
def __init__( self , A=3_05_22 , A=7_68 , A=12 , A=12 , A=30_72 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=2 , A=0.02 , A=1e-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> Union[str, Any]:
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A: str = vocab_size
A: Dict = hidden_size
A: List[Any] = num_hidden_layers
A: Tuple = num_attention_heads
A: Union[str, Any] = hidden_act
A: Union[str, Any] = intermediate_size
A: Tuple = hidden_dropout_prob
A: str = attention_probs_dropout_prob
A: Dict = max_position_embeddings
A: Optional[Any] = type_vocab_size
A: Tuple = initializer_range
A: List[Any] = layer_norm_eps
A: Tuple = position_embedding_type
A: Union[str, Any] = use_cache
A: List[Any] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( __a ):
"""simple docstring"""
@property
def a__ ( self ) -> Dict:
if self.task == "multiple-choice":
A: Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A: int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 135 | '''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : int = 32 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 255 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCAmelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[str, Any]=7 , __lowerCAmelCase : Optional[int]=30 , __lowerCAmelCase : Tuple=400 , __lowerCAmelCase : List[Any]=3 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = do_resize
_lowerCAmelCase = size if size is not None else {'shortest_edge': 288}
_lowerCAmelCase = size_divisor
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_pad
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
def a ( self : str ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
if not batched:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image.size
else:
_lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2]
_lowerCAmelCase = size / min(__lowerCAmelCase , __lowerCAmelCase )
if h < w:
_lowerCAmelCase , _lowerCAmelCase = size, scale * w
else:
_lowerCAmelCase , _lowerCAmelCase = scale * h, size
_lowerCAmelCase = int((1333 / 800) * size )
if max(__lowerCAmelCase , __lowerCAmelCase ) > max_size:
_lowerCAmelCase = max_size / max(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = newh * scale
_lowerCAmelCase = neww * scale
_lowerCAmelCase , _lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
_lowerCAmelCase , _lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCAmelCase = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_lowerCAmelCase = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __a , unittest.TestCase ):
"""simple docstring"""
__A = BridgeTowerImageProcessor if is_vision_available() else None
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def a ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self : Any ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'size_divisor' ) )
def a ( self : Dict ):
"""simple docstring"""
pass
def a ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : str ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 309 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
# getting number of pixels in the image
lowerCAmelCase__ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
lowerCAmelCase__ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
lowerCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 707 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase ( _lowercase ):
@staticmethod
@abstractmethod
def __magic_name__( __UpperCAmelCase ):
raise NotImplementedError()
@abstractmethod
def __magic_name__( self ):
raise NotImplementedError()
| 470 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''linear'''
lowerCAmelCase = '''cosine'''
lowerCAmelCase = '''cosine_with_restarts'''
lowerCAmelCase = '''polynomial'''
lowerCAmelCase = '''constant'''
lowerCAmelCase = '''constant_with_warmup'''
lowerCAmelCase = '''piecewise_constant'''
def lowerCamelCase__ ( _lowercase , _lowercase = -1 ):
'''simple docstring'''
return LambdaLR(_lowercase , lambda _lowercase : 1 , last_epoch=_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = -1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1.0 , _lowercase ) )
return 1.0
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = -1 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Tuple = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = rule_str.split(''':''' )
UpperCAmelCase_ : List[Any] = int(_lowercase )
UpperCAmelCase_ : Optional[int] = float(_lowercase )
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Optional[Any] = float(rule_list[-1] )
def create_rules_function(_lowercase , _lowercase ):
def rule_func(_lowercase ) -> float:
UpperCAmelCase_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ : Dict = create_rules_function(_lowercase , _lowercase )
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=-1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 0.5 , _lowercase = -1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
UpperCAmelCase_ : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowercase ) * 2.0 * progress )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 , _lowercase = -1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
UpperCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowercase ) * progress) % 1.0) )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=1E-7 , _lowercase=1.0 , _lowercase=-1 ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ : Tuple = lr_init - lr_end
UpperCAmelCase_ : Tuple = num_training_steps - num_warmup_steps
UpperCAmelCase_ : List[str] = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowercase , _lowercase , _lowercase )
__a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = 1 , _lowercase = 1.0 , _lowercase = -1 , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = SchedulerType(_lowercase )
UpperCAmelCase_ : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowercase , last_epoch=_lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowercase , step_rules=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowercase , num_warmup_steps=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , num_cycles=_lowercase , last_epoch=_lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , power=_lowercase , last_epoch=_lowercase , )
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , last_epoch=_lowercase ) | 30 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs | 30 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list[list] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = current_set.copy()
for row_index, row in enumerate(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = row[0]
for column_index, column in enumerate(_lowerCamelCase ):
if magnitude == 0:
__SCREAMING_SNAKE_CASE : Any = column
continue
__SCREAMING_SNAKE_CASE : Optional[int] = column / magnitude
# Subtract to cancel term
__SCREAMING_SNAKE_CASE : Tuple = current_set[0]
__SCREAMING_SNAKE_CASE : Tuple = [first_row]
__SCREAMING_SNAKE_CASE : Union[str, Any] = current_set[1::]
for row in current_set:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowerCamelCase )
continue
for column_index in range(len(_lowerCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_lowerCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__SCREAMING_SNAKE_CASE : str = final_set[0]
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__SCREAMING_SNAKE_CASE : Any = simplify(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = resultant
return final_set
def lowerCAmelCase_ ( _lowerCamelCase: list[list] ):
if len(_lowerCamelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
__SCREAMING_SNAKE_CASE : Optional[int] = len(_lowerCamelCase ) + 1
if any(len(_lowerCamelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_lowerCamelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_lowerCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
__SCREAMING_SNAKE_CASE : Tuple = equations.copy()
if any(0 in row for row in data_set ):
__SCREAMING_SNAKE_CASE : Any = data_set.copy()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for row_index, row in enumerate(_lowerCamelCase ):
if 0 not in row:
__SCREAMING_SNAKE_CASE : Dict = data_set.pop(_lowerCamelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = data_set.copy()
__SCREAMING_SNAKE_CASE : List[Any] = simplify(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = simplified[::-1]
__SCREAMING_SNAKE_CASE : list = []
for row in simplified:
__SCREAMING_SNAKE_CASE : List[str] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__SCREAMING_SNAKE_CASE : int = row.copy()[: len(_lowerCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_lowerCamelCase ) == 0:
solutions.append(0 )
continue
__SCREAMING_SNAKE_CASE : Tuple = temp_row[1::]
__SCREAMING_SNAKE_CASE : Dict = temp_row[::-1]
for column_index, column in enumerate(_lowerCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = []
for item in solutions:
final.append(float(round(_lowerCamelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]])) | 178 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: List[Any] ):
# ===== initialization =====
__SCREAMING_SNAKE_CASE : Optional[int] = Mock()
__SCREAMING_SNAKE_CASE : Optional[int] = conn, Mock()
__SCREAMING_SNAKE_CASE : str = iter([1, None] )
__SCREAMING_SNAKE_CASE : Any = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once() | 178 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
a = logging.get_logger(__name__)
a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
a = {
'Salesforce/codegen-350M-mono': 2_048,
}
class a_ ( snake_case ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : Optional[int] = CodeGenTokenizer
def __init__( self : Optional[Any] , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Any="<|endoftext|>" , a_ : Union[str, Any]="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Union[str, Any]=False , **a_ : Optional[int] , ) -> List[str]:
super().__init__(
a_ , a_ , tokenizer_file=a_ , unk_token=a_ , bos_token=a_ , eos_token=a_ , add_prefix_space=a_ , **a_ , )
if kwargs.pop('add_bos_token' , a_ ):
snake_case: List[str] =kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
snake_case: int =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , a_ ) != add_prefix_space:
snake_case: List[str] =getattr(a_ , pre_tok_state.pop('type' ) )
snake_case: Any =add_prefix_space
snake_case: Any =pre_tok_class(**a_ )
snake_case: List[Any] =add_prefix_space
def UpperCamelCase ( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Optional[int] ) -> BatchEncoding:
snake_case: Optional[int] =kwargs.get('is_split_into_words' , a_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_ , **a_ )
def UpperCamelCase ( self : int , *a_ : Any , **a_ : Any ) -> BatchEncoding:
snake_case: List[Any] =kwargs.get('is_split_into_words' , a_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_ , **a_ )
def UpperCamelCase ( self : List[Any] , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
snake_case: int =self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def UpperCamelCase ( self : Tuple , a_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , a_ : bool = False , a_ : bool = None , a_ : Optional[List[str]] = None , **a_ : List[str] , ) -> str:
snake_case: Any =super().decode(
token_ids=a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ , **a_ , )
if truncate_before_pattern is not None and len(a_ ) > 0:
snake_case: int =self.truncate(a_ , a_ )
return decoded_text
def UpperCamelCase ( self : Dict , a_ : List[str] , a_ : str ) -> Optional[int]:
def find_re(a_ : Dict , a_ : Optional[Any] , a_ : Tuple ):
snake_case: Optional[Any] =pattern.search(a_ , a_ )
return m.start() if m else -1
snake_case: Union[str, Any] =[re.compile(a_ , re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case: List[Any] =list(re.finditer('^print' , a_ , re.MULTILINE ) )
if len(a_ ) > 1:
snake_case: List[str] =completion[: prints[1].start()]
snake_case: Tuple =list(re.finditer('^def' , a_ , re.MULTILINE ) )
if len(a_ ) > 1:
snake_case: int =completion[: defs[1].start()]
snake_case: Any =0
snake_case: List[str] =[
pos for pos in [find_re(a_ , a_ , a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 350 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
a = '.'
if __name__ == "__main__":
a = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
a = []
a = []
with open(doctest_file_path) as fp:
for line in fp:
a = line.strip()
a = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
a = '\n'.join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 350 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def _A ( ):
a__ : Optional[Any] = 9
a__ : Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a__ : List[str] = kruskal(lowerCamelCase , lowerCamelCase )
a__ : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase ) == sorted(lowerCamelCase )
| 708 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def A_( A : Dict):
if "resnet-50" in model_name:
UpperCamelCase = ResNetConfig.from_pretrained('microsoft/resnet-50')
elif "resnet-101" in model_name:
UpperCamelCase = ResNetConfig.from_pretrained('microsoft/resnet-101')
else:
raise ValueError('Model name should include either resnet50 or resnet101')
UpperCamelCase = DetrConfig(use_timm_backbone=a__ , backbone_config=a__)
# set label attributes
UpperCamelCase = """panoptic""" in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """coco-detection-id2label.json"""
UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset') , 'r'))
UpperCamelCase = {int(a__): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def A_( A : List[Any]):
UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight'))
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight'))
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias'))
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean'))
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var'))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
))
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
))
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
))
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
))
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
))
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
))
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
))
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
))
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias'''))
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
))
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
])
return rename_keys
def A_( A : Optional[Any] , A : Tuple , A : Optional[Any]):
UpperCamelCase = state_dict.pop(a__)
UpperCamelCase = val
def A_( A : str , A : int=False):
UpperCamelCase = """"""
if is_panoptic:
UpperCamelCase = """detr."""
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''')
UpperCamelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''')
UpperCamelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''')
UpperCamelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''')
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def A_( ):
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(a__ , stream=a__).raw)
return im
@torch.no_grad()
def A_( A : int , A : int=None , A : Union[str, Any]=False):
UpperCamelCase = get_detr_config(a__)
# load original model from torch hub
UpperCamelCase = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f'''Converting model {model_name}...''')
UpperCamelCase = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=a__).eval()
UpperCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(a__):
if is_panoptic:
UpperCamelCase = """detr.""" + src
rename_key(a__ , a__ , a__)
# query, key and value matrices need special treatment
read_in_q_k_v(a__ , is_panoptic=a__)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr')
and not key.startswith('class_labels_classifier')
and not key.startswith('bbox_predictor')
):
UpperCamelCase = state_dict.pop(a__)
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(a__)
UpperCamelCase = val
elif key.startswith('bbox_attention') or key.startswith('mask_head'):
continue
else:
UpperCamelCase = state_dict.pop(a__)
UpperCamelCase = val
else:
if not key.startswith('class_labels_classifier') and not key.startswith('bbox_predictor'):
UpperCamelCase = state_dict.pop(a__)
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = DetrForSegmentation(a__) if is_panoptic else DetrForObjectDetection(a__)
model.load_state_dict(a__)
model.eval()
# verify our conversion on an image
UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCamelCase = DetrImageProcessor(format=a__)
UpperCamelCase = processor(images=prepare_img() , return_tensors='pt')
UpperCamelCase = encoding["""pixel_values"""]
UpperCamelCase = detr(a__)
UpperCamelCase = model(a__)
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(a__).mkdir(exist_ok=a__)
model.save_pretrained(a__)
processor.save_pretrained(a__)
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...')
model.push_to_hub(f'''nielsr/{model_name}''')
processor.push_to_hub(f'''nielsr/{model_name}''')
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
lowerCAmelCase : str = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 3 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Optional[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(a__ , a__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ , a_ : List[Any] = emb.weight.shape
a_ : Dict = nn.Linear(a__ , a__ , bias=a__)
a_ : str = emb.weight.data
return lin_layer
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Optional[int] = torch.load(a__ , map_location="""cpu""")
a_ : int = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
a_ : Dict = mam_aaa["""model"""]
remove_ignore_keys_(a__)
a_ : List[str] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
a_ : Union[str, Any] = MaMaaaConfig(
vocab_size=a__ , max_position_embeddings=1_0_2_4 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
a_ : Union[str, Any] = state_dict["""decoder.embed_tokens.weight"""]
a_ : List[Any] = MaMaaaForConditionalGeneration(a__)
model.model.load_state_dict(a__ , strict=a__)
a_ : Any = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__snake_case : int = parser.parse_args()
__snake_case : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 540 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCAmelCase :
A__ : Union[str, Any] = BlenderbotSmallConfig
A__ : Any = {}
A__ : str = '''gelu'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=1_3 , __lowerCamelCase : Any=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : Dict=9_9 , __lowerCamelCase : Any=3_2 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Dict=3_7 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=2_0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Any=0 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_snake_case = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_snake_case = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_snake_case = inputs_dict['''input_ids''']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['''attention_mask'''][:1, :]
_snake_case = inputs_dict['''head_mask''']
_snake_case = 1
# first forward pass
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> List[str]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Optional[int] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
A__ : Union[str, Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
A__ : List[Any] = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : int = True
A__ : Union[str, Any] = False
A__ : str = False
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = TFBlenderbotSmallModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
A__ : Optional[Any] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
A__ : Tuple = '''facebook/blenderbot_small-90M'''
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.tokenizer(self.src_text , return_tensors='''tf''' )
_snake_case = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_snake_case = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 404 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase__ = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowercase__ = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class snake_case__ :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Iterable[int] ) -> Optional[int]:
"""simple docstring"""
snake_case : Node | None = None
for i in sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ):
snake_case : List[Any] = Node(UpperCamelCase__ , self.head )
def __iter__( self : Any ) -> int:
"""simple docstring"""
snake_case : Optional[int] = self.head
while node:
yield node.data
snake_case : Any = node.next_node
def __len__( self : Optional[int] ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Optional[Any] ) -> Any:
"""simple docstring"""
return " -> ".join([str(UpperCamelCase__ ) for node in self] )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
return SortedLinkedList(list(SCREAMING_SNAKE_CASE__ ) + list(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 638 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class _UpperCAmelCase ( lowercase ):
def __init__( self : Optional[int] , UpperCAmelCase : Any=-1):
# in NER datasets, the last column is usually reserved for NER label
SCREAMING_SNAKE_CASE_ :Tuple = label_idx
def _snake_case ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Union[Split, str]):
if isinstance(UpperCAmelCase , UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :List[Any] = mode.value
SCREAMING_SNAKE_CASE_ :Optional[Any] = os.path.join(UpperCAmelCase , F"{mode}.txt")
SCREAMING_SNAKE_CASE_ :Tuple = 1
SCREAMING_SNAKE_CASE_ :str = []
with open(UpperCAmelCase , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE_ :Tuple = []
SCREAMING_SNAKE_CASE_ :int = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=UpperCAmelCase , labels=UpperCAmelCase))
guid_index += 1
SCREAMING_SNAKE_CASE_ :Tuple = []
SCREAMING_SNAKE_CASE_ :Any = []
else:
SCREAMING_SNAKE_CASE_ :int = line.split(" ")
words.append(splits[0])
if len(UpperCAmelCase) > 1:
labels.append(splits[self.label_idx].replace("\n" , ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=UpperCAmelCase , labels=UpperCAmelCase))
return examples
def _snake_case ( self : List[Any] , UpperCAmelCase : TextIO , UpperCAmelCase : TextIO , UpperCAmelCase : List):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(UpperCAmelCase)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"
writer.write(UpperCAmelCase)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0])
def _snake_case ( self : List[str] , UpperCAmelCase : str):
if path:
with open(UpperCAmelCase , "r") as f:
SCREAMING_SNAKE_CASE_ :Any = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _UpperCAmelCase ( lowercase ):
def __init__( self : Dict):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2)
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : str):
if path:
with open(UpperCAmelCase , "r") as f:
SCREAMING_SNAKE_CASE_ :Optional[int] = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ :Dict = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _UpperCAmelCase ( lowercase ):
def _snake_case ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[Split, str]):
if isinstance(UpperCAmelCase , UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :List[str] = mode.value
SCREAMING_SNAKE_CASE_ :List[str] = os.path.join(UpperCAmelCase , F"{mode}.txt")
SCREAMING_SNAKE_CASE_ :Dict = 1
SCREAMING_SNAKE_CASE_ :List[str] = []
with open(UpperCAmelCase , encoding="utf-8") as f:
for sentence in parse_incr(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :List[str] = []
SCREAMING_SNAKE_CASE_ :int = []
for token in sentence:
words.append(token["form"])
labels.append(token["upos"])
assert len(UpperCAmelCase) == len(UpperCAmelCase)
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=UpperCAmelCase , labels=UpperCAmelCase))
guid_index += 1
return examples
def _snake_case ( self : List[Any] , UpperCAmelCase : TextIO , UpperCAmelCase : TextIO , UpperCAmelCase : List):
SCREAMING_SNAKE_CASE_ :List[str] = 0
for sentence in parse_incr(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :str = preds_list[example_id]
SCREAMING_SNAKE_CASE_ :List[Any] = ""
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0)}) "
out += "\n"
writer.write(UpperCAmelCase)
example_id += 1
def _snake_case ( self : Tuple , UpperCAmelCase : str):
if path:
with open(UpperCAmelCase , "r") as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 631 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Union[str, Any] = '''unispeech-sat'''
def __init__( self , UpperCamelCase__=32 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-5 , UpperCamelCase__="group" , UpperCamelCase__="gelu" , UpperCamelCase__=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__=False , UpperCamelCase__=128 , UpperCamelCase__=16 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=0.05 , UpperCamelCase__=10 , UpperCamelCase__=2 , UpperCamelCase__=0.0 , UpperCamelCase__=10 , UpperCamelCase__=0 , UpperCamelCase__=320 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=100 , UpperCamelCase__=256 , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__="mean" , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=256 , UpperCamelCase__=(512, 512, 512, 512, 1500) , UpperCamelCase__=(5, 3, 3, 1, 1) , UpperCamelCase__=(1, 2, 3, 1, 1) , UpperCamelCase__=512 , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=504 , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
snake_case : int = hidden_size
snake_case : Union[str, Any] = feat_extract_norm
snake_case : Union[str, Any] = feat_extract_activation
snake_case : Optional[Any] = list(UpperCamelCase_ )
snake_case : Optional[int] = list(UpperCamelCase_ )
snake_case : Dict = list(UpperCamelCase_ )
snake_case : Tuple = conv_bias
snake_case : Dict = num_conv_pos_embeddings
snake_case : List[Any] = num_conv_pos_embedding_groups
snake_case : List[Any] = len(self.conv_dim )
snake_case : List[str] = num_hidden_layers
snake_case : List[Any] = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : int = num_attention_heads
snake_case : Dict = hidden_dropout
snake_case : Optional[Any] = attention_dropout
snake_case : List[str] = activation_dropout
snake_case : List[Any] = feat_proj_dropout
snake_case : Optional[int] = final_dropout
snake_case : List[str] = layerdrop
snake_case : Optional[Any] = layer_norm_eps
snake_case : Optional[int] = initializer_range
snake_case : Tuple = vocab_size
snake_case : List[str] = num_clusters
snake_case : Tuple = do_stable_layer_norm
snake_case : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case : Tuple = apply_spec_augment
snake_case : List[str] = mask_time_prob
snake_case : List[str] = mask_time_length
snake_case : Dict = mask_time_min_masks
snake_case : List[str] = mask_feature_prob
snake_case : Optional[int] = mask_feature_length
snake_case : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case : List[Any] = num_codevectors_per_group
snake_case : Optional[int] = num_codevector_groups
snake_case : str = contrastive_logits_temperature
snake_case : Union[str, Any] = feat_quantizer_dropout
snake_case : Tuple = num_negatives
snake_case : Dict = codevector_dim
snake_case : Tuple = proj_codevector_dim
snake_case : Optional[int] = diversity_loss_weight
# ctc loss
snake_case : List[Any] = ctc_loss_reduction
snake_case : Dict = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case : Union[str, Any] = list(UpperCamelCase_ )
snake_case : str = list(UpperCamelCase_ )
snake_case : Tuple = list(UpperCamelCase_ )
snake_case : Optional[Any] = xvector_output_dim
@property
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 704 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _lowerCAmelCase ( tr.AbstractTransform ):
def __init__( self , UpperCamelCase__ = " " ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = sentence_delimiter
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return list(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__snake_case = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
__snake_case = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
snake_case : Optional[int] = 0
snake_case : int = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Dict = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 117 | 0 |
def lowercase__ ( A_: int , A_: Tuple ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =[1]
for i in range(2 , A_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__UpperCAmelCase =[]
__UpperCAmelCase =list(range(A_ ) )
# Find permutation
while factorials:
__UpperCAmelCase =factorials.pop()
__UpperCAmelCase , __UpperCAmelCase =divmod(A_ , A_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
from typing import List
from .keymap import KEYMAP, get_character
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
def decorator(A_: int ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += [key]
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
def lowercase__ ( *A_: List[str] ) -> Optional[int]:
"""simple docstring"""
def decorator(A_: Tuple ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += keys
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
class _A ( UpperCamelCase ):
"""simple docstring"""
def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
__UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
__UpperCAmelCase =value
return new_cls
@staticmethod
def _a ( cls : Dict ) -> List[Any]:
__UpperCAmelCase =get_character()
if char != KEYMAP["undefined"]:
__UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE )
if handler:
__UpperCAmelCase =char
return handler(cls )
else:
return None
def lowercase__ ( cls: str ) -> int:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 68 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : str , a__ : List[str] , a__ : Optional[int]=7 , a__ : Optional[Any]=3 , a__ : Union[str, Any]=30 , a__ : Union[str, Any]=400 , a__ : List[Any]=True , a__ : Optional[Any]=None , a__ : str=True , a__ : Any=1 / 255 , a__ : str=True , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : Tuple=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__magic_name__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_pad
def snake_case__ ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case__ ( self : Union[str, Any] , a__ : Any , a__ : int=False ):
if not batched:
__magic_name__ = image_inputs[0]
if isinstance(a__ , Image.Image ):
__magic_name__ , __magic_name__ = image.size
else:
__magic_name__ , __magic_name__ = image.shape[1], image.shape[2]
if w < h:
__magic_name__ = int(self.size['''shortest_edge'''] * h / w )
__magic_name__ = self.size['''shortest_edge''']
elif w > h:
__magic_name__ = self.size['''shortest_edge''']
__magic_name__ = int(self.size['''shortest_edge'''] * w / h )
else:
__magic_name__ = self.size['''shortest_edge''']
__magic_name__ = self.size['''shortest_edge''']
else:
__magic_name__ = []
for image in image_inputs:
__magic_name__ , __magic_name__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ = max(a__ , key=lambda a__ : item[0] )[0]
__magic_name__ = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = DetrImageProcessor if is_vision_available() else None
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = DetrImageProcessingTester(self )
@property
def snake_case__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[str] ):
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''do_rescale''' ) )
self.assertTrue(hasattr(a__ , '''rescale_factor''' ) )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
self.assertTrue(hasattr(a__ , '''do_pad''' ) )
def snake_case__ ( self : int ):
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , a__ )
__magic_name__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , a__ )
def snake_case__ ( self : List[str] ):
pass
def snake_case__ ( self : List[str] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Optional[int] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : int ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self : Dict ):
# prepare image and target
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__magic_name__ = json.loads(f.read() )
__magic_name__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
__magic_name__ = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
__magic_name__ = image_processing(images=a__ , annotations=a__ , return_tensors='''pt''' )
# verify pixel values
__magic_name__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , a__ )
__magic_name__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
__magic_name__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a__ ) )
# verify boxes
__magic_name__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a__ )
__magic_name__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a__ , atol=1E-3 ) )
# verify image_id
__magic_name__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a__ ) )
# verify is_crowd
__magic_name__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a__ ) )
# verify class_labels
__magic_name__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a__ ) )
# verify orig_size
__magic_name__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a__ ) )
# verify size
__magic_name__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a__ ) )
@slow
def snake_case__ ( self : Optional[int] ):
# prepare image, target and masks_path
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__magic_name__ = json.loads(f.read() )
__magic_name__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
__magic_name__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__magic_name__ = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
__magic_name__ = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors='''pt''' )
# verify pixel values
__magic_name__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , a__ )
__magic_name__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
__magic_name__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a__ ) )
# verify boxes
__magic_name__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a__ )
__magic_name__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a__ , atol=1E-3 ) )
# verify image_id
__magic_name__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a__ ) )
# verify is_crowd
__magic_name__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a__ ) )
# verify class_labels
__magic_name__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a__ ) )
# verify masks
__magic_name__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , a__ )
# verify orig_size
__magic_name__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a__ ) )
# verify size
__magic_name__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a__ ) )
| 245 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( a ) -> bool:
'''simple docstring'''
return len(set(a ) ) == len(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowerCamelCase ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Any , lowerCamelCase_: Dict , lowerCamelCase_: Tuple ):
'''simple docstring'''
A : Tuple = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A : List[str] = {
"wmt16-en-de-dist-12-1": [2_8.3, 2_7.5_2],
"wmt16-en-de-dist-6-1": [2_7.4, 2_7.1_1],
"wmt16-en-de-12-1": [2_6.9, 2_5.7_5],
}
A : Optional[int] = f"""{src_lang}-{tgt_lang}"""
A : List[str] = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
A : Optional[Any] = os.path.join(lowerCamelCase_ , '''README.md''' )
print(f"""Generating {path}""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowerCamelCase_ )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name) | 256 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE__ : List[str] = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
SCREAMING_SNAKE_CASE__ : Any = """zero2"""
SCREAMING_SNAKE_CASE__ : Dict = """zero3"""
SCREAMING_SNAKE_CASE__ : str = [ZEROa, ZEROa]
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
a__ : Optional[Any] = parameterized.to_safe_name("_".join(str(lowerCamelCase ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __lowerCAmelCase ( _UpperCamelCase ):
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> str:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def _snake_case ( self , snake_case ) -> str:
"""simple docstring"""
pass
def _snake_case ( self , snake_case , snake_case , snake_case = 10 , snake_case = True , snake_case = True , snake_case = True , ) -> str:
"""simple docstring"""
a__ : Tuple = models[model]
a__ : int = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def _snake_case ( self , snake_case , snake_case , snake_case = 10 , snake_case = 1 , snake_case = True , snake_case = True , ) -> Optional[Any]:
"""simple docstring"""
a__ : str = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
a__ : List[Any] = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
a__ : str = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
a__ : Dict = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
a__ : Optional[int] = self.get_launcher(snake_case )
a__ : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def _snake_case ( self , snake_case=False ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 112 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164 | def a_ (_lowerCAmelCase : int = 100 )-> int:
snake_case: int = n * (n + 1) * (2 * n + 1) / 6
snake_case: Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 164 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Tuple = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
__UpperCAmelCase : Any = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__UpperCAmelCase : str = model(__lowercase )["""last_hidden_state"""]
__UpperCAmelCase : int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice.
__UpperCAmelCase : str = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a__ : List[Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a__ : Optional[int] = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def snake_case (UpperCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCamelCase )[0]
@deprecated(UpperCamelCase , """Please use tf.data to implement this functionality.""" )
def snake_case (UpperCamelCase : Optional[int] ):
'''simple docstring'''
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase ) as bytestream:
lowerCamelCase__ = _readaa(UpperCamelCase )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
lowerCamelCase__ = _readaa(UpperCamelCase )
lowerCamelCase__ = _readaa(UpperCamelCase )
lowerCamelCase__ = _readaa(UpperCamelCase )
lowerCamelCase__ = bytestream.read(rows * cols * num_images )
lowerCamelCase__ = numpy.frombuffer(UpperCamelCase , dtype=numpy.uinta )
lowerCamelCase__ = data.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , 1 )
return data
@deprecated(UpperCamelCase , """Please use tf.one_hot on tensors.""" )
def snake_case (UpperCamelCase : Any , UpperCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ = labels_dense.shape[0]
lowerCamelCase__ = numpy.arange(UpperCamelCase ) * num_classes
lowerCamelCase__ = numpy.zeros((num_labels, num_classes) )
lowerCamelCase__ = 1
return labels_one_hot
@deprecated(UpperCamelCase , """Please use tf.data to implement this functionality.""" )
def snake_case (UpperCamelCase : Optional[Any] , UpperCamelCase : Any=False , UpperCamelCase : Any=10 ):
'''simple docstring'''
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase ) as bytestream:
lowerCamelCase__ = _readaa(UpperCamelCase )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
lowerCamelCase__ = _readaa(UpperCamelCase )
lowerCamelCase__ = bytestream.read(UpperCamelCase )
lowerCamelCase__ = numpy.frombuffer(UpperCamelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCamelCase , UpperCamelCase )
return labels
class lowercase :
"""simple docstring"""
@deprecated(
a_ , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : List[str] , a_ : Dict , a_ : str , a_ : Optional[Any]=False , a_ : int=False , a_ : Optional[Any]=dtypes.floataa , a_ : Union[str, Any]=True , a_ : Dict=None , ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = random_seed.get_seed(a_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCamelCase__ = dtypes.as_dtype(a_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
lowerCamelCase__ = 1_00_00
lowerCamelCase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowerCamelCase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCamelCase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCamelCase__ = images.astype(numpy.floataa )
lowerCamelCase__ = numpy.multiply(a_ , 1.0 / 2_55.0 )
lowerCamelCase__ = images
lowerCamelCase__ = labels
lowerCamelCase__ = 0
lowerCamelCase__ = 0
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self._images
@property
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
return self._labels
@property
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return self._num_examples
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self._epochs_completed
def _UpperCamelCase ( self : Union[str, Any] , a_ : Dict , a_ : Dict=False , a_ : List[Any]=True ):
"""simple docstring"""
if fake_data:
lowerCamelCase__ = [1] * 7_84
lowerCamelCase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(a_ )],
[fake_label for _ in range(a_ )],
)
lowerCamelCase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCamelCase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(a_ )
lowerCamelCase__ = self.images[perma]
lowerCamelCase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCamelCase__ = self._num_examples - start
lowerCamelCase__ = self._images[start : self._num_examples]
lowerCamelCase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCamelCase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(a_ )
lowerCamelCase__ = self.images[perm]
lowerCamelCase__ = self.labels[perm]
# Start next epoch
lowerCamelCase__ = 0
lowerCamelCase__ = batch_size - rest_num_examples
lowerCamelCase__ = self._index_in_epoch
lowerCamelCase__ = self._images[start:end]
lowerCamelCase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCamelCase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCamelCase , """Please write your own downloading logic.""" )
def snake_case (UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
if not gfile.Exists(UpperCamelCase ):
gfile.MakeDirs(UpperCamelCase )
lowerCamelCase__ = os.path.join(UpperCamelCase , UpperCamelCase )
if not gfile.Exists(UpperCamelCase ):
urllib.request.urlretrieve(UpperCamelCase , UpperCamelCase ) # noqa: S310
with gfile.GFile(UpperCamelCase ) as f:
lowerCamelCase__ = f.size()
print("""Successfully downloaded""" , UpperCamelCase , UpperCamelCase , """bytes.""" )
return filepath
@deprecated(
UpperCamelCase , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def snake_case (UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : int=False , UpperCamelCase : List[str]=dtypes.floataa , UpperCamelCase : List[Any]=True , UpperCamelCase : Dict=5000 , UpperCamelCase : Dict=None , UpperCamelCase : Dict=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCamelCase , one_hot=UpperCamelCase , dtype=UpperCamelCase , seed=UpperCamelCase )
lowerCamelCase__ = fake()
lowerCamelCase__ = fake()
lowerCamelCase__ = fake()
return _Datasets(train=UpperCamelCase , validation=UpperCamelCase , test=UpperCamelCase )
if not source_url: # empty string check
lowerCamelCase__ = DEFAULT_SOURCE_URL
lowerCamelCase__ = """train-images-idx3-ubyte.gz"""
lowerCamelCase__ = """train-labels-idx1-ubyte.gz"""
lowerCamelCase__ = """t10k-images-idx3-ubyte.gz"""
lowerCamelCase__ = """t10k-labels-idx1-ubyte.gz"""
lowerCamelCase__ = _maybe_download(
UpperCamelCase , UpperCamelCase , source_url + train_images_file )
with gfile.Open(UpperCamelCase , """rb""" ) as f:
lowerCamelCase__ = _extract_images(UpperCamelCase )
lowerCamelCase__ = _maybe_download(
UpperCamelCase , UpperCamelCase , source_url + train_labels_file )
with gfile.Open(UpperCamelCase , """rb""" ) as f:
lowerCamelCase__ = _extract_labels(UpperCamelCase , one_hot=UpperCamelCase )
lowerCamelCase__ = _maybe_download(
UpperCamelCase , UpperCamelCase , source_url + test_images_file )
with gfile.Open(UpperCamelCase , """rb""" ) as f:
lowerCamelCase__ = _extract_images(UpperCamelCase )
lowerCamelCase__ = _maybe_download(
UpperCamelCase , UpperCamelCase , source_url + test_labels_file )
with gfile.Open(UpperCamelCase , """rb""" ) as f:
lowerCamelCase__ = _extract_labels(UpperCamelCase , one_hot=UpperCamelCase )
if not 0 <= validation_size <= len(UpperCamelCase ):
lowerCamelCase__ = (
"""Validation size should be between 0 and """
f'''{len(UpperCamelCase )}. Received: {validation_size}.'''
)
raise ValueError(UpperCamelCase )
lowerCamelCase__ = train_images[:validation_size]
lowerCamelCase__ = train_labels[:validation_size]
lowerCamelCase__ = train_images[validation_size:]
lowerCamelCase__ = train_labels[validation_size:]
lowerCamelCase__ = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
lowerCamelCase__ = _DataSet(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
lowerCamelCase__ = _DataSet(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
lowerCamelCase__ = _DataSet(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
return _Datasets(train=UpperCamelCase , validation=UpperCamelCase , test=UpperCamelCase )
| 703 |
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
return str(UpperCamelCase ) == str(UpperCamelCase )[::-1]
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
return int(UpperCamelCase ) + int(str(UpperCamelCase )[::-1] )
def snake_case (UpperCamelCase : int = 10000 ):
'''simple docstring'''
lowerCamelCase__ = []
for num in range(1 , UpperCamelCase ):
lowerCamelCase__ = 0
lowerCamelCase__ = num
while iterations < 50:
lowerCamelCase__ = sum_reverse(UpperCamelCase )
iterations += 1
if is_palindrome(UpperCamelCase ):
break
else:
lychrel_nums.append(UpperCamelCase )
return len(UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A = 1 , __A = 1_000):
"""simple docstring"""
_a = 1
_a = 0
for divide_by_number in range(__A , digit + 1):
_a = []
_a = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(__A):
_a = len(__A)
_a = divide_by_number
else:
has_been_divided.append(__A)
_a = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 1 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowerCamelCase = True
from torch.cuda.amp import autocast
_lowerCamelCase = logging.getLogger(__name__)
def lowerCAmelCase_ ( lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase_ )
@dataclass
class snake_case :
lowerCamelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase__ = field(
default=__UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase__ = field(
default=__UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowerCamelCase__ = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
lowerCamelCase__ = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
lowerCamelCase__ = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
lowerCamelCase__ = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
lowerCamelCase__ = field(
default=0.0_5 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
lowerCamelCase__ = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class snake_case :
lowerCamelCase__ = field(
default=__UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase__ = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCamelCase__ = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase__ = field(
default=__UpperCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCamelCase__ = field(
default=__UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=__UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class snake_case :
lowerCamelCase__ = 42
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self :Union[str, Any] , _lowerCamelCase :List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__SCREAMING_SNAKE_CASE : Any = [{'''input_values''': feature['''input_values''']} for feature in features]
__SCREAMING_SNAKE_CASE : str = [{'''input_ids''': feature['''labels''']} for feature in features]
__SCREAMING_SNAKE_CASE : Optional[Any] = self.processor.pad(
_lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__SCREAMING_SNAKE_CASE : str = self.processor.pad(
labels=_lowerCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
__SCREAMING_SNAKE_CASE : Optional[Any] = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = labels
return batch
class snake_case ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :nn.Module , _lowerCamelCase :Dict[str, Union[torch.Tensor, Any]] ):
model.train()
__SCREAMING_SNAKE_CASE : str = self._prepare_inputs(_lowerCamelCase )
if self.use_amp:
with autocast():
__SCREAMING_SNAKE_CASE : str = self.compute_loss(_lowerCamelCase , _lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.compute_loss(_lowerCamelCase , _lowerCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__SCREAMING_SNAKE_CASE : Union[str, Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__SCREAMING_SNAKE_CASE : int = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__SCREAMING_SNAKE_CASE : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_lowerCamelCase )
else:
loss.backward()
return loss.detach()
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowercase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
__SCREAMING_SNAKE_CASE : Dict = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(lowercase_ : Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Dict = re.sub(lowercase_ , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
__SCREAMING_SNAKE_CASE : Tuple = train_dataset.map(lowercase_ , remove_columns=['''sentence'''] )
__SCREAMING_SNAKE_CASE : Any = eval_dataset.map(lowercase_ , remove_columns=['''sentence'''] )
def extract_all_chars(lowercase_ : Tuple ):
__SCREAMING_SNAKE_CASE : Any = ''' '''.join(batch['''text'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(set(lowercase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__SCREAMING_SNAKE_CASE : int = train_dataset.map(
lowercase_ , batched=lowercase_ , batch_size=-1 , keep_in_memory=lowercase_ , remove_columns=train_dataset.column_names , )
__SCREAMING_SNAKE_CASE : Any = train_dataset.map(
lowercase_ , batched=lowercase_ , batch_size=-1 , keep_in_memory=lowercase_ , remove_columns=eval_dataset.column_names , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
__SCREAMING_SNAKE_CASE : int = {v: k for k, v in enumerate(lowercase_ )}
__SCREAMING_SNAKE_CASE : List[str] = vocab_dict[''' ''']
del vocab_dict[" "]
__SCREAMING_SNAKE_CASE : Any = len(lowercase_ )
__SCREAMING_SNAKE_CASE : Any = len(lowercase_ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(lowercase_ , lowercase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : Any = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
__SCREAMING_SNAKE_CASE : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
__SCREAMING_SNAKE_CASE : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE : int = min(len(lowercase_ ) , data_args.max_train_samples )
__SCREAMING_SNAKE_CASE : Tuple = train_dataset.select(range(lowercase_ ) )
if data_args.max_val_samples is not None:
__SCREAMING_SNAKE_CASE : Dict = eval_dataset.select(range(data_args.max_val_samples ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowercase_ : Optional[int] ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = torchaudio.load(batch['''path'''] )
__SCREAMING_SNAKE_CASE : Any = resampler(lowercase_ ).squeeze().numpy()
__SCREAMING_SNAKE_CASE : Any = 1_6000
__SCREAMING_SNAKE_CASE : str = batch['''text''']
return batch
__SCREAMING_SNAKE_CASE : Tuple = train_dataset.map(
lowercase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__SCREAMING_SNAKE_CASE : int = eval_dataset.map(
lowercase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowercase_ : List[str] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
__SCREAMING_SNAKE_CASE : int = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(lowercase_ )
return batch
__SCREAMING_SNAKE_CASE : Dict = train_dataset.map(
lowercase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , )
__SCREAMING_SNAKE_CASE : Optional[Any] = eval_dataset.map(
lowercase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
__SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_metric('''wer''' )
def compute_metrics(lowercase_ : List[str] ):
__SCREAMING_SNAKE_CASE : Tuple = pred.predictions
__SCREAMING_SNAKE_CASE : Any = np.argmax(lowercase_ , axis=-1 )
__SCREAMING_SNAKE_CASE : Any = processor.tokenizer.pad_token_id
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor.batch_decode(lowercase_ )
# we do not want to group tokens when computing the metrics
__SCREAMING_SNAKE_CASE : Dict = processor.batch_decode(pred.label_ids , group_tokens=lowercase_ )
__SCREAMING_SNAKE_CASE : Dict = wer_metric.compute(predictions=lowercase_ , references=lowercase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__SCREAMING_SNAKE_CASE : int = DataCollatorCTCWithPadding(processor=lowercase_ , padding=lowercase_ )
# Initialize our Trainer
__SCREAMING_SNAKE_CASE : Optional[int] = CTCTrainer(
model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , compute_metrics=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__SCREAMING_SNAKE_CASE : Tuple = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__SCREAMING_SNAKE_CASE : str = model_args.model_name_or_path
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__SCREAMING_SNAKE_CASE : Optional[int] = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
__SCREAMING_SNAKE_CASE : Tuple = train_result.metrics
__SCREAMING_SNAKE_CASE : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
__SCREAMING_SNAKE_CASE : Tuple = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''train''' , lowercase_ )
trainer.save_metrics('''train''' , lowercase_ )
trainer.save_state()
# Evaluation
__SCREAMING_SNAKE_CASE : str = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__SCREAMING_SNAKE_CASE : Dict = trainer.evaluate()
__SCREAMING_SNAKE_CASE : str = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''eval''' , lowercase_ )
trainer.save_metrics('''eval''' , lowercase_ )
return results
if __name__ == "__main__":
main()
| 401 |
"""simple docstring"""
_lowerCamelCase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCamelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 401 | 1 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE_ = 'base_with_context'
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) ,requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCamelCase = weights[f'layers_{lyr_num}']
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase = ly_weight['''attention''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) ,requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCamelCase = weights[f'layers_{lyr_num}']
UpperCamelCase = ly_weight['''attention''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) ,requires_grad=_lowercase )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCamelCase = weights[f'layers_{lyr_num}']
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCamelCase = ly_weight['''self_attention''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase = ly_weight['''MultiHeadDotProductAttention_0''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCamelCase = jnp.tree_util.tree_map(onp.array ,_lowercase )
UpperCamelCase = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
UpperCamelCase = os.path.join(args.checkpoint_path ,'''..''' ,'''config.gin''' )
UpperCamelCase = inference.parse_training_gin_file(_lowercase ,_lowercase )
UpperCamelCase = inference.InferenceModel(args.checkpoint_path ,_lowercase )
UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ,variance_type='''fixed_large''' )
UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj='''gated-gelu''' ,)
UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length['''targets_context'''] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj='''gated-gelu''' ,)
UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length['''targets_context'''] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
UpperCamelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] ,_lowercase )
UpperCamelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] ,_lowercase )
UpperCamelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''] ,_lowercase )
UpperCamelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=_lowercase ,continuous_encoder=_lowercase ,decoder=_lowercase ,scheduler=_lowercase ,melgan=_lowercase ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
main(args) | 34 |
from math import isclose, sqrt
def _a ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
lowerCAmelCase__ : Union[str, Any] = point_y / 4 / point_x
lowerCAmelCase__ : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase__ : str = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase__ : List[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase__ : str = outgoing_gradient**2 + 4
lowerCAmelCase__ : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase__ : Tuple = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase__ : Optional[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase__ : Any = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase__ : Tuple = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCAmelCase__ : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _a ( __UpperCamelCase : float = 1.4 ,__UpperCamelCase : float = -9.6 ):
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : float = first_x_coord
lowerCAmelCase__ : float = first_y_coord
lowerCAmelCase__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case__ = Features({"""audio""": Audio()} )
snake_case__ = Features({"""labels""": ClassLabel} )
snake_case__ = "audio"
snake_case__ = "labels"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase = copy.deepcopy(self )
UpperCamelCase = self.label_schema.copy()
UpperCamelCase = features[self.label_column]
UpperCamelCase = label_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 410 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : str = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 410 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = '''huggingface/label-files'''
UpperCamelCase = '''imagenet-1k-id2label.json'''
UpperCamelCase = json.load(open(hf_hub_download(_lowercase ,_lowercase ,repo_type='''dataset''' ) ,'''r''' ) )
UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCamelCase = BitConfig(
conv_layer=_lowercase ,num_labels=1000 ,idalabel=_lowercase ,labelaid=_lowercase ,)
return config
def __snake_case ( _lowercase ):
"""simple docstring"""
if "stem.conv" in name:
UpperCamelCase = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
UpperCamelCase = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
UpperCamelCase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
UpperCamelCase = '''bit.encoder.''' + name
return name
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
return im
@torch.no_grad()
def __snake_case ( _lowercase ,_lowercase ,_lowercase=False ):
"""simple docstring"""
UpperCamelCase = get_config(_lowercase )
# load original model from timm
UpperCamelCase = create_model(_lowercase ,pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
UpperCamelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
UpperCamelCase = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
UpperCamelCase = create_transform(**resolve_data_config({} ,model=_lowercase ) )
UpperCamelCase = transform.transforms
UpperCamelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCamelCase = BitImageProcessor(
do_resize=_lowercase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=_lowercase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=_lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
UpperCamelCase = prepare_img()
UpperCamelCase = transform(_lowercase ).unsqueeze(0 )
UpperCamelCase = processor(_lowercase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase ,_lowercase )
# verify logits
with torch.no_grad():
UpperCamelCase = model(_lowercase )
UpperCamelCase = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCamelCase = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 34 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'efficientformer'
def __init__( self : Any , lowerCAmelCase : List[int] = [3, 2, 6, 4] , lowerCAmelCase : List[int] = [48, 96, 224, 448] , lowerCAmelCase : List[bool] = [True, True, True, True] , lowerCAmelCase : int = 448 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 4 , lowerCAmelCase : int = 7 , lowerCAmelCase : int = 5 , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 4 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : float = 1e-5 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 1e-12 , lowerCAmelCase : int = 224 , lowerCAmelCase : float = 1e-05 , **lowerCAmelCase : int , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = hidden_sizes
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = depths
lowerCAmelCase = mlp_expansion_ratio
lowerCAmelCase = downsamples
lowerCAmelCase = dim
lowerCAmelCase = key_dim
lowerCAmelCase = attention_ratio
lowerCAmelCase = resolution
lowerCAmelCase = pool_size
lowerCAmelCase = downsample_patch_size
lowerCAmelCase = downsample_stride
lowerCAmelCase = downsample_pad
lowerCAmelCase = drop_path_rate
lowerCAmelCase = num_metaad_blocks
lowerCAmelCase = distillation
lowerCAmelCase = use_layer_scale
lowerCAmelCase = layer_scale_init_value
lowerCAmelCase = image_size
lowerCAmelCase = batch_norm_eps
| 169 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 73 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 1 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Tuple=7 ) -> List[Any]:
lowercase : str =None
if token is not None:
lowercase : List[str] ={'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowercase : Optional[int] ='''636036'''
lowercase : Union[str, Any] =f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowercase : Optional[int] =requests.get(__magic_name__ , headers=__magic_name__ ).json()
return result["workflow_runs"]
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Optional[Any]:
lowercase : Dict =get_daily_ci_runs(__magic_name__ )
lowercase : Optional[Any] =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase : Tuple =workflow_run['''id''']
break
return workflow_run_id
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : int ) -> Tuple:
lowercase : Tuple =get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
lowercase : str =get_artifacts_links(worflow_run_id=__magic_name__ , token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase : int =artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ , artifact_url=__magic_name__ , output_dir=__magic_name__ , token=__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ) -> int:
get_last_daily_ci_artifacts(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase : List[str] ={}
for artifact_name in artifact_names:
lowercase : Any =os.path.join(__magic_name__ , f'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
lowercase : List[Any] ={}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
lowercase : List[str] =f.read().decode('''UTF-8''' )
return results
| 92 | def _lowerCamelCase ( a_ : int , a_ : list[int] , a_ : int):
def count_of_possible_combinations(a_ : int) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item) for item in array)
return count_of_possible_combinations(a_)
def _lowerCamelCase ( a_ : int , a_ : list[int] , a_ : int):
def count_of_possible_combinations_with_dp_array(
a_ : int , a_ : list[int]) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase :Any = sum(
count_of_possible_combinations_with_dp_array(target - item , a_)
for item in array)
lowerCamelCase :Optional[Any] = answer
return answer
lowerCamelCase :Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : list[int] , a_ : int):
lowerCamelCase :Optional[Any] = [0] * (target + 1)
lowerCamelCase :List[str] = 1
for i in range(1 , target + 1):
for j in range(a_):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = 3
A__ = 5
A__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 166 | 0 |
'''simple docstring'''
from collections import defaultdict
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = first_str.lower().strip()
UpperCAmelCase__ : List[str] = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase__ : List[Any] = first_str.replace(""" """ , """""" )
UpperCAmelCase__ : List[str] = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
return False
# Default values for count should be 0
UpperCAmelCase__ : defaultdict[str, int] = defaultdict(UpperCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A =input('Enter the first string ').strip()
__A =input('Enter the second string ').strip()
__A =check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""") | 714 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A =logging.get_logger(__name__)
class _snake_case ( a__ ):
lowerCAmelCase :int = ['''input_features''', '''is_longer''']
def __init__( self , _lowerCamelCase=64 , _lowerCamelCase=4_8000 , _lowerCamelCase=480 , _lowerCamelCase=10 , _lowerCamelCase=1024 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase = 0 , _lowerCamelCase = 1_4000 , _lowerCamelCase = None , _lowerCamelCase = "fusion" , _lowerCamelCase = "repeatpad" , **_lowerCamelCase , ):
super().__init__(
feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : Dict = top_db
UpperCAmelCase__ : int = truncation
UpperCAmelCase__ : Optional[Any] = padding
UpperCAmelCase__ : Union[str, Any] = fft_window_size
UpperCAmelCase__ : List[Any] = (fft_window_size >> 1) + 1
UpperCAmelCase__ : Optional[int] = hop_length
UpperCAmelCase__ : int = max_length_s
UpperCAmelCase__ : Tuple = max_length_s * sampling_rate
UpperCAmelCase__ : List[Any] = sampling_rate
UpperCAmelCase__ : Tuple = frequency_min
UpperCAmelCase__ : Dict = frequency_max
UpperCAmelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCamelCase , min_frequency=_lowerCamelCase , max_frequency=_lowerCamelCase , sampling_rate=_lowerCamelCase , norm=_lowerCamelCase , mel_scale="""htk""" , )
UpperCAmelCase__ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCamelCase , min_frequency=_lowerCamelCase , max_frequency=_lowerCamelCase , sampling_rate=_lowerCamelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__)
UpperCAmelCase__ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : str = spectrogram(
_lowerCamelCase , window_function(self.fft_window_size , """hann""") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowerCamelCase , log_mel="""dB""" , )
return log_mel_spectrogram.T
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase__ : List[Any] = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase__ : Union[str, Any] = [0]
# randomly choose index for each part
UpperCAmelCase__ : List[str] = np.random.choice(ranges[0])
UpperCAmelCase__ : Optional[int] = np.random.choice(ranges[1])
UpperCAmelCase__ : List[str] = np.random.choice(ranges[2])
UpperCAmelCase__ : str = mel[idx_front : idx_front + chunk_frames, :]
UpperCAmelCase__ : Optional[int] = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCAmelCase__ : Tuple = mel[idx_back : idx_back + chunk_frames, :]
UpperCAmelCase__ : int = torch.tensor(mel[None, None, :])
UpperCAmelCase__ : Optional[int] = torch.nn.functional.interpolate(
_lowerCamelCase , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = mel_shrink[0][0].numpy()
UpperCAmelCase__ : Optional[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCAmelCase__ : List[str] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCAmelCase__ : List[Any] = len(_lowerCamelCase) - max_length
UpperCAmelCase__ : Optional[Any] = np.random.randint(0 , overflow + 1)
UpperCAmelCase__ : int = waveform[idx : idx + max_length]
UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
UpperCAmelCase__ : Any = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters)
UpperCAmelCase__ : int = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCAmelCase__ : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCAmelCase__ : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0)
UpperCAmelCase__ : List[Any] = False
else:
UpperCAmelCase__ : Tuple = self._random_mel_fusion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''')
else:
UpperCAmelCase__ : str = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCAmelCase__ : int = int(max_length / len(_lowerCamelCase))
UpperCAmelCase__ : int = np.stack(np.tile(_lowerCamelCase , n_repeat + 1))[:max_length]
if padding == "repeatpad":
UpperCAmelCase__ : str = int(max_length / len(_lowerCamelCase))
UpperCAmelCase__ : str = np.stack(np.tile(_lowerCamelCase , _lowerCamelCase))
UpperCAmelCase__ : Optional[Any] = np.pad(_lowerCamelCase , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0)
if truncation == "fusion":
UpperCAmelCase__ : List[Any] = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters)
UpperCAmelCase__ : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
UpperCAmelCase__ : int = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : Dict = truncation if truncation is not None else self.truncation
UpperCAmelCase__ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
UpperCAmelCase__ : int = isinstance(_lowerCamelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''')
UpperCAmelCase__ : List[str] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
UpperCAmelCase__ : str = [np.asarray(_lowerCamelCase , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray):
UpperCAmelCase__ : Optional[int] = np.asarray(_lowerCamelCase , dtype=np.floataa)
elif isinstance(_lowerCamelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
UpperCAmelCase__ : str = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
UpperCAmelCase__ : int = [np.asarray(_lowerCamelCase)]
# convert to mel spectrogram, truncate and pad if needed.
UpperCAmelCase__ : int = [
self._get_input_mel(_lowerCamelCase , max_length if max_length else self.nb_max_samples , _lowerCamelCase , _lowerCamelCase)
for waveform in raw_speech
]
UpperCAmelCase__ : int = []
UpperCAmelCase__ : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(_lowerCamelCase)
is_longer.append(_lowerCamelCase)
if truncation == "fusion" and sum(_lowerCamelCase) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCAmelCase__ : Optional[int] = np.random.randint(0 , len(_lowerCamelCase))
UpperCAmelCase__ : Tuple = True
if isinstance(input_mel[0] , _lowerCamelCase):
UpperCAmelCase__ : str = [np.asarray(_lowerCamelCase , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
UpperCAmelCase__ : str = [[longer] for longer in is_longer]
UpperCAmelCase__ : Optional[int] = {"""input_features""": input_mel, """is_longer""": is_longer}
UpperCAmelCase__ : Optional[int] = BatchFeature(_lowerCamelCase)
if return_tensors is not None:
UpperCAmelCase__ : str = input_features.convert_to_tensors(_lowerCamelCase)
return input_features | 113 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=7 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=9_9 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : List[Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = projection_dim
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = dropout
_a = attention_dropout
_a = max_position_embeddings
_a = initializer_range
_a = scope
_a = bos_token_id
def _UpperCAmelCase ( self : List[Any] ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_a = input_mask.numpy()
_a , _a = input_mask.shape
_a = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a__ ):
_a = 1
_a = 0
_a = self.get_config()
return config, input_ids, tf.convert_to_tensor(a__ )
def _UpperCAmelCase ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
_a = TFBlipTextModel(config=a__ )
_a = model(a__ , attention_mask=a__ , training=a__ )
_a = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self : Optional[int] ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_A = (TFBlipTextModel,) if is_tf_available() else ()
_A = False
_A = False
_A = False
def _UpperCAmelCase ( self : List[str] ):
_a = BlipTextModelTester(self )
_a = ConfigTester(self , config_class=a__ , hidden_size=3_7 )
def _UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _UpperCAmelCase ( self : List[Any] ):
pass
def _UpperCAmelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCAmelCase ( self : Tuple ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCAmelCase ( self : List[Any] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCAmelCase ( self : str ):
pass
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFBlipTextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=a__ )
| 562 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int ,a__ : List[Any] ,):
a__ = parent
a__ = 13
a__ = 7
a__ = True
a__ = True
a__ = True
a__ = True
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 2
a__ = 99
a__ = 0
a__ = 32
a__ = 2
a__ = 4
a__ = 0.1
a__ = 0.1
a__ = 5_12
a__ = 16
a__ = 2
a__ = 0.02
a__ = 3
a__ = 4
a__ = "last"
a__ = True
a__ = None
a__ = 0
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
a__ = None
if self.use_input_lengths:
a__ = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase_ ( self : int ,a__ : List[Any] ,a__ : Optional[int] ,a__ : Union[str, Any] ,a__ : Union[str, Any] ,a__ : int ,a__ : Optional[int] ,a__ : int ,a__ : Dict ,a__ : Any ,):
a__ = TFFlaubertModel(config=a__ )
a__ = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
a__ = model(a__ )
a__ = [input_ids, input_mask]
a__ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[int] ,a__ : List[str] ,a__ : Optional[Any] ,a__ : int ,a__ : Any ,a__ : Optional[int] ,a__ : Optional[int] ,a__ : Dict ,a__ : Dict ,a__ : Union[str, Any] ,):
a__ = TFFlaubertWithLMHeadModel(a__ )
a__ = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
a__ = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int ,a__ : str ,a__ : Dict ,a__ : List[str] ,a__ : str ,a__ : Tuple ,a__ : int ,a__ : int ,a__ : str ,a__ : str ,):
a__ = TFFlaubertForQuestionAnsweringSimple(a__ )
a__ = {"input_ids": input_ids, "lengths": input_lengths}
a__ = model(a__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Union[str, Any] ,a__ : int ,a__ : Optional[int] ,a__ : Any ,a__ : List[Any] ,a__ : Optional[Any] ,a__ : Dict ,a__ : int ,a__ : List[str] ,a__ : Tuple ,):
a__ = TFFlaubertForSequenceClassification(a__ )
a__ = {"input_ids": input_ids, "lengths": input_lengths}
a__ = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Dict ,a__ : Union[str, Any] ,a__ : Tuple ,a__ : List[str] ,a__ : List[str] ,a__ : Dict ,a__ : List[Any] ,a__ : Tuple ,a__ : List[str] ,a__ : Tuple ,):
a__ = self.num_labels
a__ = TFFlaubertForTokenClassification(config=a__ )
a__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a__ = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple ,a__ : Optional[Any] ,a__ : Optional[Any] ,a__ : int ,a__ : Union[str, Any] ,a__ : List[str] ,a__ : Optional[Any] ,a__ : List[Any] ,a__ : str ,a__ : str ,):
a__ = self.num_choices
a__ = TFFlaubertForMultipleChoice(config=a__ )
a__ = tf.tile(tf.expand_dims(a__ ,1 ) ,(1, self.num_choices, 1) )
a__ = tf.tile(tf.expand_dims(a__ ,1 ) ,(1, self.num_choices, 1) )
a__ = tf.tile(tf.expand_dims(a__ ,1 ) ,(1, self.num_choices, 1) )
a__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a__ = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Tuple ):
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase__ = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCAmelCase_ ( self : str ,a__ : Any ,a__ : List[str] ,a__ : Optional[int] ,a__ : Dict ,a__ : str ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase_ ( self : Dict ):
a__ = TFFlaubertModelTester(self )
a__ = ConfigTester(self ,config_class=a__ ,emb_dim=37 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a__ )
def lowerCAmelCase_ ( self : Any ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a__ )
def lowerCAmelCase_ ( self : List[str] ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a__ )
def lowerCAmelCase_ ( self : List[str] ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a__ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*a__ )
def lowerCAmelCase_ ( self : int ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*a__ )
@slow
def lowerCAmelCase_ ( self : Tuple ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFFlaubertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase_ ( self : Any ):
a__ = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
a__ = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
a__ = model(a__ )[0]
a__ = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape ,a__ )
# compare the actual values for a slice.
a__ = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 331 | 0 |
"""simple docstring"""
def _lowerCAmelCase(a : int ) -> list:
_SCREAMING_SNAKE_CASE =int(a )
if n_element < 1:
_SCREAMING_SNAKE_CASE =ValueError('''a should be a positive number''' )
raise my_error
_SCREAMING_SNAKE_CASE =[1]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(0, 0, 0)
_SCREAMING_SNAKE_CASE =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
UpperCAmelCase_ : Dict = hamming(int(n))
print('''-----------------------------------------------------''')
print(f"The list with nth numbers is: {hamming_numbers}")
print('''-----------------------------------------------------''')
| 700 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
UpperCAmelCase_ : Tuple = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
UpperCAmelCase_ : List[Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def _lowerCAmelCase(a : Union[str, Any] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE =numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=a )[0]
@deprecated(a , '''Please use tf.data to implement this functionality.''' )
def _lowerCAmelCase(a : str ) -> Optional[int]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=a ) as bytestream:
_SCREAMING_SNAKE_CASE =_readaa(a )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =bytestream.read(rows * cols * num_images )
_SCREAMING_SNAKE_CASE =numpy.frombuffer(a , dtype=numpy.uinta )
_SCREAMING_SNAKE_CASE =data.reshape(a , a , a , 1 )
return data
@deprecated(a , '''Please use tf.one_hot on tensors.''' )
def _lowerCAmelCase(a : Tuple , a : Dict ) -> Dict:
_SCREAMING_SNAKE_CASE =labels_dense.shape[0]
_SCREAMING_SNAKE_CASE =numpy.arange(a ) * num_classes
_SCREAMING_SNAKE_CASE =numpy.zeros((num_labels, num_classes) )
_SCREAMING_SNAKE_CASE =1
return labels_one_hot
@deprecated(a , '''Please use tf.data to implement this functionality.''' )
def _lowerCAmelCase(a : Any , a : Any=False , a : Tuple=10 ) -> Optional[int]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=a ) as bytestream:
_SCREAMING_SNAKE_CASE =_readaa(a )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =bytestream.read(a )
_SCREAMING_SNAKE_CASE =numpy.frombuffer(a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(a , a )
return labels
class __UpperCAmelCase :
'''simple docstring'''
@deprecated(
_A , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , _A , _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =random_seed.get_seed(_A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_SCREAMING_SNAKE_CASE =dtypes.as_dtype(_A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
_SCREAMING_SNAKE_CASE =1_0_0_0_0
_SCREAMING_SNAKE_CASE =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
_SCREAMING_SNAKE_CASE =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_SCREAMING_SNAKE_CASE =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_SCREAMING_SNAKE_CASE =images.astype(numpy.floataa )
_SCREAMING_SNAKE_CASE =numpy.multiply(_A , 1.0 / 255.0 )
_SCREAMING_SNAKE_CASE =images
_SCREAMING_SNAKE_CASE =labels
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._images
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._labels
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._num_examples
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._epochs_completed
def UpperCamelCase_ ( self , _A , _A=False , _A=True ):
'''simple docstring'''
if fake_data:
_SCREAMING_SNAKE_CASE =[1] * 7_8_4
_SCREAMING_SNAKE_CASE =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_A )],
[fake_label for _ in range(_A )],
)
_SCREAMING_SNAKE_CASE =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_SCREAMING_SNAKE_CASE =numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_SCREAMING_SNAKE_CASE =self.images[perma]
_SCREAMING_SNAKE_CASE =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_SCREAMING_SNAKE_CASE =self._num_examples - start
_SCREAMING_SNAKE_CASE =self._images[start : self._num_examples]
_SCREAMING_SNAKE_CASE =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_SCREAMING_SNAKE_CASE =numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_SCREAMING_SNAKE_CASE =self.images[perm]
_SCREAMING_SNAKE_CASE =self.labels[perm]
# Start next epoch
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =batch_size - rest_num_examples
_SCREAMING_SNAKE_CASE =self._index_in_epoch
_SCREAMING_SNAKE_CASE =self._images[start:end]
_SCREAMING_SNAKE_CASE =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_SCREAMING_SNAKE_CASE =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(a , '''Please write your own downloading logic.''' )
def _lowerCAmelCase(a : Any , a : str , a : Optional[int] ) -> Optional[Any]:
if not gfile.Exists(a ):
gfile.MakeDirs(a )
_SCREAMING_SNAKE_CASE =os.path.join(a , a )
if not gfile.Exists(a ):
urllib.request.urlretrieve(a , a ) # noqa: S310
with gfile.GFile(a ) as f:
_SCREAMING_SNAKE_CASE =f.size()
print('''Successfully downloaded''' , a , a , '''bytes.''' )
return filepath
@deprecated(
a , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def _lowerCAmelCase(a : Optional[int] , a : Tuple=False , a : str=False , a : Union[str, Any]=dtypes.floataa , a : Tuple=True , a : Tuple=5000 , a : Union[str, Any]=None , a : List[str]=DEFAULT_SOURCE_URL , ) -> List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=a , one_hot=a , dtype=a , seed=a )
_SCREAMING_SNAKE_CASE =fake()
_SCREAMING_SNAKE_CASE =fake()
_SCREAMING_SNAKE_CASE =fake()
return _Datasets(train=a , validation=a , test=a )
if not source_url: # empty string check
_SCREAMING_SNAKE_CASE =DEFAULT_SOURCE_URL
_SCREAMING_SNAKE_CASE ='''train-images-idx3-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''train-labels-idx1-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''t10k-images-idx3-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''t10k-labels-idx1-ubyte.gz'''
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + train_images_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_images(a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + train_labels_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_labels(a , one_hot=a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + test_images_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_images(a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + test_labels_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_labels(a , one_hot=a )
if not 0 <= validation_size <= len(a ):
_SCREAMING_SNAKE_CASE =(
'''Validation size should be between 0 and '''
f"""{len(a )}. Received: {validation_size}."""
)
raise ValueError(a )
_SCREAMING_SNAKE_CASE =train_images[:validation_size]
_SCREAMING_SNAKE_CASE =train_labels[:validation_size]
_SCREAMING_SNAKE_CASE =train_images[validation_size:]
_SCREAMING_SNAKE_CASE =train_labels[validation_size:]
_SCREAMING_SNAKE_CASE ={'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
return _Datasets(train=a , validation=a , test=a )
| 165 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
SCREAMING_SNAKE_CASE = ['small', 'medium', 'large']
SCREAMING_SNAKE_CASE = 'lm_head.decoder.weight'
SCREAMING_SNAKE_CASE = 'lm_head.weight'
def lowercase_ ( __A : str , __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =torch.load(__A )
lowercase : List[str] =d.pop(__A )
os.makedirs(__A , exist_ok=__A )
torch.save(__A , os.path.join(__A , __A ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
SCREAMING_SNAKE_CASE = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
SCREAMING_SNAKE_CASE = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
SCREAMING_SNAKE_CASE = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 94 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
UpperCamelCase__ = "▁"
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = VOCAB_FILES_NAMES
snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : int = BigBirdTokenizer
snake_case : List[Any] = ["""input_ids""", """attention_mask"""]
snake_case : List[int] = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase="[CLS]" , **__lowerCAmelCase , ):
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else bos_token
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else eos_token
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else pad_token
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else cls_token
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 619 | 0 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowerCamelCase : Dict = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def __UpperCAmelCase ( __magic_name__ = "dhaka" ,__magic_name__ = 5 )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = min(lowerCamelCase_ ,50 ) # Prevent abuse!
snake_case_ : Union[str, Any] = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
snake_case_ : Union[str, Any] = requests.get("https://www.google.com/search" ,params=lowerCamelCase_ ,headers=lowerCamelCase_ )
snake_case_ : List[Any] = BeautifulSoup(html.text ,"html.parser" )
snake_case_ : Tuple = """""".join(
re.findall(r"AF_initDataCallback\(([^<]+)\);" ,str(soup.select("script" ) ) ) )
snake_case_ : Optional[Any] = json.dumps(lowerCamelCase_ )
snake_case_ : Tuple = json.loads(lowerCamelCase_ )
snake_case_ : Optional[Any] = re.findall(
r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," ,lowerCamelCase_ ,)
if not matched_google_image_data:
return 0
snake_case_ : int = re.sub(
r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" ,"" ,str(lowerCamelCase_ ) ,)
snake_case_ : Any = re.findall(
r"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" ,lowerCamelCase_ ,)
for index, fixed_full_res_image in enumerate(lowerCamelCase_ ):
if index >= max_images:
return index
snake_case_ : int = bytes(lowerCamelCase_ ,"ascii" ).decode(
"unicode-escape" )
snake_case_ : Dict = bytes(lowerCamelCase_ ,"ascii" ).decode(
"unicode-escape" )
snake_case_ : Union[str, Any] = urllib.request.build_opener()
snake_case_ : List[str] = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(lowerCamelCase_ )
snake_case_ : Optional[int] = F'''query_{query.replace(' ' ,'_' )}'''
if not os.path.exists(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
urllib.request.urlretrieve( # noqa: S310
lowerCamelCase_ ,F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
__lowerCamelCase : List[Any] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise
| 713 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656 | 0 |
from __future__ import annotations
def __a ( A__ : str ):
return [ord(A__ ) - 96 for elem in plain]
def __a ( A__ : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ):
SCREAMING_SNAKE_CASE = encode(input("-> " ).strip().lower() )
print("Encoded: " , A__ )
print("Decoded:" , decode(A__ ) )
if __name__ == "__main__":
main() | 16 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 | 1 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(a )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = {}
lowerCAmelCase__ :int = {}
lowerCAmelCase__ :Tuple = {}
# preprocess args
if "points_per_batch" in kwargs:
lowerCAmelCase__ :List[str] = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
lowerCAmelCase__ :Dict = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
lowerCAmelCase__ :str = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
lowerCAmelCase__ :Dict = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
lowerCAmelCase__ :Optional[int] = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCAmelCase__ :Optional[int] = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
lowerCAmelCase__ :Optional[Any] = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
lowerCAmelCase__ :Any = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
lowerCAmelCase__ :List[Any] = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
lowerCAmelCase__ :List[Any] = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
lowerCAmelCase__ :Optional[Any] = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
lowerCAmelCase__ :Any = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , *__UpperCAmelCase , num_workers=__UpperCAmelCase , batch_size=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=6_4 , __UpperCAmelCase = 0 , __UpperCAmelCase = 5_1_2 / 1_5_0_0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 1 , ):
'''simple docstring'''
lowerCAmelCase__ :int = load_image(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.image_processor.size['longest_edge']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor.generate_crop_boxes(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = self.image_processor(images=__UpperCAmelCase , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
lowerCAmelCase__ :Optional[int] = self.get_inference_context()
with inference_context():
lowerCAmelCase__ :str = self._ensure_tensor_on_device(__UpperCAmelCase , device=self.device )
lowerCAmelCase__ :Any = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
lowerCAmelCase__ :List[Any] = image_embeddings
lowerCAmelCase__ :Any = grid_points.shape[1]
lowerCAmelCase__ :str = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Dict = grid_points[:, i : i + points_per_batch, :, :]
lowerCAmelCase__ :List[str] = input_labels[:, i : i + points_per_batch]
lowerCAmelCase__ :int = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0.88 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = model_inputs.pop('input_boxes' )
lowerCAmelCase__ :Dict = model_inputs.pop('is_last' )
lowerCAmelCase__ :Any = model_inputs.pop('original_sizes' ).tolist()
lowerCAmelCase__ :List[str] = model_inputs.pop('reshaped_input_sizes' ).tolist()
lowerCAmelCase__ :Any = self.model(**__UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCAmelCase__ :str = model_outputs['pred_masks']
lowerCAmelCase__ :List[Any] = self.image_processor.post_process_masks(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , binarize=__UpperCAmelCase )
lowerCAmelCase__ :Dict = model_outputs['iou_scores']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.7 , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = []
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :Any = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
lowerCAmelCase__ :List[str] = torch.cat(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = torch.cat(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.image_processor.post_process_for_mask_generation(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = defaultdict(__UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {}
if output_rle_mask:
lowerCAmelCase__ :Optional[int] = rle_mask
if output_bboxes_mask:
lowerCAmelCase__ :Dict = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 560 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
__magic_name__ :List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__magic_name__ :List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Use FP16 to accelerate inference."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Benchmark training of model"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Verbose memory tracing"""} )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Trace memory line by line"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Save result to a CSV file"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Save all print statements in a log file"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether to print environment information"""} )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
__magic_name__ :str = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
__magic_name__ :str = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
__magic_name__ :str = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
__magic_name__ :str = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
__magic_name__ :str = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
__magic_name__ :str = field(
default=f"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
__magic_name__ :int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def snake_case ( self ):
'''simple docstring'''
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , __UpperCAmelCase , )
def snake_case ( self ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case ( self ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def snake_case ( self ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 560 | 1 |
from __future__ import annotations
from collections.abc import Callable
A_ : Any = list[list[float | int]]
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Matrix:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
for row in range(SCREAMING_SNAKE_CASE ):
for col in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = matrix[row][col]
__UpperCAmelCase = vector[row][0]
__UpperCAmelCase = 0
__UpperCAmelCase = 0
while row < size and col < size:
# pivoting
__UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__UpperCAmelCase , __UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
__UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , SCREAMING_SNAKE_CASE ):
for row in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(SCREAMING_SNAKE_CASE , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(SCREAMING_SNAKE_CASE )
]
def __a ( SCREAMING_SNAKE_CASE ) -> Callable[[int], int]:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = [[0] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE ):
for col in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (x_val + 1) ** (size - col - 1)
__UpperCAmelCase = y_val
__UpperCAmelCase = solve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def interpolated_func(SCREAMING_SNAKE_CASE ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(SCREAMING_SNAKE_CASE ) )
return interpolated_func
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def __a ( SCREAMING_SNAKE_CASE = question_function , SCREAMING_SNAKE_CASE = 1_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase = [func(SCREAMING_SNAKE_CASE ) for x_val in range(1 , order + 1 )]
__UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__UpperCAmelCase = 0
__UpperCAmelCase = 42
__UpperCAmelCase = 42
for poly in polynomials:
__UpperCAmelCase = 1
while func(SCREAMING_SNAKE_CASE ) == poly(SCREAMING_SNAKE_CASE ):
x_val += 1
ret += poly(SCREAMING_SNAKE_CASE )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 303 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = DanceDiffusionPipeline
a__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a__ = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
a__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase__ , use_timestep_embedding=lowercase__ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
__UpperCAmelCase = IPNDMScheduler()
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> Dict:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = DanceDiffusionPipeline(**lowercase__ )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = self.get_dummy_inputs(lowercase__ )
__UpperCAmelCase = pipe(**lowercase__ )
__UpperCAmelCase = output.audios
__UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase_ (self ) -> Union[str, Any]:
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase_ (self ) -> List[str]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def lowerCAmelCase_ (self ) -> Optional[int]:
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase_ (self ) -> Any:
return super().test_attention_slicing_forward_pass()
def lowerCAmelCase_ (self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = torch_device
__UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(generator=lowercase__ , num_inference_steps=100 , audio_length_in_s=4.096 )
__UpperCAmelCase = output.audios
__UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device
__UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(generator=lowercase__ , num_inference_steps=100 , audio_length_in_s=4.096 )
__UpperCAmelCase = output.audios
__UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 303 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ : Any = 16
UpperCamelCase__ : Dict = 32
def A_( A , A = 16 ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCAmelCase_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
UpperCAmelCase_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ : List[Any] = mocked_dataloaders # noqa: F811
def A_( A , A ):
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1":
UpperCAmelCase_ = 2
# Initialize accelerator
UpperCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config["lr"]
UpperCAmelCase_ = int(config["""num_epochs"""] )
UpperCAmelCase_ = int(config["""seed"""] )
UpperCAmelCase_ = int(config["""batch_size"""] )
UpperCAmelCase_ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
UpperCAmelCase_ = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ = model(**lowercase_ )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase_ = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**lowercase_ )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
def A_( ):
UpperCAmelCase_ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 707 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : int = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : Tuple = 'trajectory_transformer'
lowerCamelCase : Optional[Any] = ['past_key_values']
lowerCamelCase : Tuple = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Optional[Any]=1_00 , __lowercase : int=5 , __lowercase : str=1 , __lowercase : Union[str, Any]=1 , __lowercase : Optional[Any]=2_49 , __lowercase : Tuple=6 , __lowercase : Tuple=17 , __lowercase : List[str]=25 , __lowercase : Tuple=4 , __lowercase : Dict=4 , __lowercase : int=1_28 , __lowercase : List[str]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.0006 , __lowercase : Optional[int]=5_12 , __lowercase : Any=0.02 , __lowercase : Tuple=1e-12 , __lowercase : Optional[Any]=1 , __lowercase : Dict=True , __lowercase : Optional[Any]=1 , __lowercase : str=5_02_56 , __lowercase : Any=5_02_56 , **__lowercase : str , ):
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = action_weight
UpperCAmelCase_ = reward_weight
UpperCAmelCase_ = value_weight
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = block_size
UpperCAmelCase_ = action_dim
UpperCAmelCase_ = observation_dim
UpperCAmelCase_ = transition_dim
UpperCAmelCase_ = learning_rate
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = kaiming_initializer_range
UpperCAmelCase_ = use_cache
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 486 | 0 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase : List[Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=8 ) -> Optional[Any]:
"""simple docstring"""
A = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
A = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__ , a__ , a__ , ) -> int:
super().__init__()
self.register_modules(
text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , movq=__a , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[Any]:
if latents is None:
A = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
A = latents.to(__a )
A = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__=None , ) -> List[Any]:
A = len(__a ) if isinstance(__a , __a ) else 1
# get prompt text embeddings
A = self.tokenizer(
__a , padding="""max_length""" , truncation=__a , max_length=77 , return_attention_mask=__a , add_special_tokens=__a , return_tensors="""pt""" , )
A = text_inputs.input_ids
A = self.tokenizer(__a , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__a , __a ):
A = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
A = text_input_ids.to(__a )
A = text_inputs.attention_mask.to(__a )
A , A = self.text_encoder(
input_ids=__a , attention_mask=__a )
A = prompt_embeds.repeat_interleave(__a , dim=0 )
A = text_encoder_hidden_states.repeat_interleave(__a , dim=0 )
A = text_mask.repeat_interleave(__a , dim=0 )
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""""""] * batch_size
elif type(__a ) is not type(__a ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !='
f' {type(__a )}.' )
elif isinstance(__a , __a ):
A = [negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
A = negative_prompt
A = self.tokenizer(
__a , padding="""max_length""" , max_length=77 , truncation=__a , return_attention_mask=__a , add_special_tokens=__a , return_tensors="""pt""" , )
A = uncond_input.input_ids.to(__a )
A = uncond_input.attention_mask.to(__a )
A , A = self.text_encoder(
input_ids=__a , attention_mask=__a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = negative_prompt_embeds.shape[1]
A = negative_prompt_embeds.repeat(1 , __a )
A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __a )
A = uncond_text_encoder_hidden_states.shape[1]
A = uncond_text_encoder_hidden_states.repeat(1 , __a , 1 )
A = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , __a , -1 )
A = uncond_text_mask.repeat_interleave(__a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([negative_prompt_embeds, prompt_embeds] )
A = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
A = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _UpperCAmelCase ( self , a__=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A = torch.device(f'cuda:{gpu_id}' )
A = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def _UpperCAmelCase ( self , a__=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
A = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
A , A = cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
if self.safety_checker is not None:
A , A = cpu_offload_with_hook(self.safety_checker , __a , prev_module_hook=__a )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ) -> str:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__( self , a__ , a__ , a__ , a__ = None , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 1 , a__ = None , a__ = None , a__ = "pil" , a__ = True , ) -> Dict:
if isinstance(__a , __a ):
A = 1
elif isinstance(__a , __a ):
A = len(__a )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__a )}' )
A = self._execution_device
A = batch_size * num_images_per_prompt
A = guidance_scale > 1.0
A , A , A = self._encode_prompt(
__a , __a , __a , __a , __a )
if isinstance(__a , __a ):
A = torch.cat(__a , dim=0 )
if isinstance(__a , __a ):
A = torch.cat(__a , dim=0 )
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__a , dim=0 )
A = negative_image_embeds.repeat_interleave(__a , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=__a )
self.scheduler.set_timesteps(__a , device=__a )
A = self.scheduler.timesteps
A = self.unet.config.in_channels
A , A = get_new_h_w(__a , __a , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __a , __a , __a , self.scheduler , )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
A = self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
A , A = noise_pred.split(latents.shape[1] , dim=1 )
A , A = noise_pred.chunk(2 )
A , A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A , A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__a , __a , __a , generator=__a , ).prev_sample
# post-processing
A = self.movq.decode(__a , force_not_quantize=__a )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 641 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger()
def UpperCamelCase ( _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : LevitConfig , _lowerCAmelCase : Path , _lowerCAmelCase : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__a = timm.create_model("""levit_128s""" , pretrained=_lowerCAmelCase )
else:
__a = timm.create_model("""levit_128""" , pretrained=_lowerCAmelCase )
if hidden_sizes == 192:
__a = timm.create_model("""levit_192""" , pretrained=_lowerCAmelCase )
if hidden_sizes == 256:
__a = timm.create_model("""levit_256""" , pretrained=_lowerCAmelCase )
if hidden_sizes == 384:
__a = timm.create_model("""levit_384""" , pretrained=_lowerCAmelCase )
from_model.eval()
__a = LevitForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
__a = OrderedDict()
__a = from_model.state_dict()
__a = list(from_model.state_dict().keys() )
__a = list(our_model.state_dict().keys() )
print(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for i in range(len(_lowerCAmelCase ) ):
__a = weights[og_keys[i]]
our_model.load_state_dict(_lowerCAmelCase )
__a = torch.randn((2, 3, 224, 224) )
__a = from_model(_lowerCAmelCase )
__a = our_model(_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), "The model logits don't match the original one."
__a = name
print(_lowerCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__a = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def UpperCamelCase ( _lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True ):
__a = """imagenet-1k-id2label.json"""
__a = 1000
__a = (1, num_labels)
__a = """huggingface/label-files"""
__a = num_labels
__a = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
__a = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
__a = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
__A = parser.parse_args()
__A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 702 | """simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCamelCase ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__a = quote(_lowerCAmelCase )
return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" , revision=_lowerCAmelCase )
| 173 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowerCAmelCase_ ( ) ->Dict:
lowerCamelCase__ : List[str] =Github(os.environ['GITHUB_TOKEN'] )
lowerCamelCase__ : List[str] =g.get_repo('huggingface/accelerate' )
lowerCamelCase__ : Tuple =repo.get_issues(state='open' )
for issue in open_issues:
lowerCamelCase__ : Optional[Any] =sorted([comment for comment in issue.get_comments()] , key=lambda snake_case_ : i.created_at , reverse=snake_case_ )
lowerCamelCase__ : str =comments[0] if len(snake_case_ ) > 0 else None
lowerCamelCase__ : Union[str, Any] =dt.utcnow()
lowerCamelCase__ : Tuple =(current_time - issue.updated_at).days
lowerCamelCase__ : str =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main() | 174 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None) | 174 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[str] = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , snake_case = True ) -> None:
"""simple docstring"""
a__ : dict[T, list[T]] = {} # dictionary of lists
a__ : Union[str, Any] = directed
def _snake_case ( self , snake_case , snake_case ) -> GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
self.adj_list[destination_vertex].append(snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
a__ : Any = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case )
a__ : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a__ : Optional[int] = [destination_vertex]
a__ : Any = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
a__ : Optional[int] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a__ : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a__ : Tuple = [destination_vertex]
a__ : str = []
return self
def __repr__( self ) -> str:
"""simple docstring"""
return pformat(self.adj_list )
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
"""simple docstring"""
from random import randint, random
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : int = 5 , ) -> list:
lowerCamelCase_ = [[-1] * number_of_cells] # Create a highway without any car
lowerCamelCase_ = 0
lowerCamelCase_ = max(_lowerCamelCase , 0 )
while i < number_of_cells:
lowerCamelCase_ = (
randint(0 , _lowerCamelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase__ ( _lowerCamelCase : list , _lowerCamelCase : int ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = highway_now[car_index + 1 :]
for cell in range(len(_lowerCamelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_lowerCamelCase , -1 )
def lowerCamelCase__ ( _lowerCamelCase : list , _lowerCamelCase : float , _lowerCamelCase : int ) -> list:
lowerCamelCase_ = len(_lowerCamelCase )
# Beforce calculations, the highway is empty
lowerCamelCase_ = [-1] * number_of_cells
for car_index in range(_lowerCamelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCamelCase_ = min(highway_now[car_index] + 1 , _lowerCamelCase )
# Number of empty cell before the next car
lowerCamelCase_ = get_distance(_lowerCamelCase , _lowerCamelCase ) - 1
# We can't have the car causing an accident
lowerCamelCase_ = min(next_highway[car_index] , _lowerCamelCase )
if random() < probability:
# Randomly, a driver will slow down
lowerCamelCase_ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase__ ( _lowerCamelCase : list , _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : int ) -> list:
lowerCamelCase_ = len(highway[0] )
for i in range(_lowerCamelCase ):
lowerCamelCase_ = update(highway[i] , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = [-1] * number_of_cells
for car_index in range(_lowerCamelCase ):
lowerCamelCase_ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCamelCase_ = (car_index + speed) % number_of_cells
# Commit the change of position
lowerCamelCase_ = speed
highway.append(_lowerCamelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class a ( unittest.TestCase ):
@require_torch
def UpperCamelCase ( self : str ) -> str:
lowerCamelCase_ = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
lowerCamelCase_ = load_dataset('ashraq/esc50' )
lowerCamelCase_ = dataset['train']['audio'][-1]['array']
lowerCamelCase_ = audio_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
lowerCamelCase_ = load_dataset('ashraq/esc50' )
lowerCamelCase_ = dataset['train']['audio'][-1]['array']
lowerCamelCase_ = audio_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
lowerCamelCase_ = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
lowerCamelCase_ = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def UpperCamelCase ( self : Optional[int] ) -> int:
pass
| 549 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_UpperCamelCase = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__lowerCamelCase : Union[str, Any] =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowerCamelCase : Tuple =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowerCamelCase : str =hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__lowerCamelCase : Any =value
elif weight_type == "weight_g":
__lowerCamelCase : Optional[int] =value
elif weight_type == "weight_v":
__lowerCamelCase : Any =value
elif weight_type == "bias":
__lowerCamelCase : str =value
else:
__lowerCamelCase : Optional[int] =value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Tuple =[]
__lowerCamelCase : List[str] =fairseq_model.state_dict()
__lowerCamelCase : Dict =hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : List[str] =False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__lowerCamelCase : str =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowerCamelCase : str =True
if "*" in mapped_key:
__lowerCamelCase : Tuple =name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__lowerCamelCase : Optional[Any] =mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowerCamelCase : List[Any] ='''weight_g'''
elif "weight_v" in name:
__lowerCamelCase : int ='''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCamelCase : Optional[Any] ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : Dict ='''weight'''
else:
__lowerCamelCase : Union[str, Any] =None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase : Optional[Any] =name.split('''.''' )
__lowerCamelCase : Optional[Any] =int(items[0] )
__lowerCamelCase : Optional[Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__lowerCamelCase : Tuple =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__lowerCamelCase : Optional[Any] =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__lowerCamelCase : Any =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__lowerCamelCase : Optional[Any] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int]=None ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =torch.load(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict =WavLMConfigOrig(checkpoint['''cfg'''] )
__lowerCamelCase : Optional[int] =WavLMOrig(SCREAMING_SNAKE_CASE )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
__lowerCamelCase : Optional[int] =WavLMConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__lowerCamelCase : Optional[int] =WavLMConfig()
__lowerCamelCase : Dict =WavLMModel(SCREAMING_SNAKE_CASE )
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavlm.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_UpperCamelCase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 712 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] =[[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCamelCase : List[Any] =(
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase = '\\n\n'
__UpperCAmelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__UpperCAmelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __lowercase ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) ,reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] ,)
def __lowercase ( self : str ,A : List[str] ,A : str ,A : int = 16 ,A : bool = True ,A : Union[str, Any]=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ : str = """cuda"""
else:
UpperCAmelCase__ : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(A )
UpperCAmelCase__ : Dict = model.to(A )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ : Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ : int = model.config.max_length - 1
else:
UpperCAmelCase__ : List[Any] = model.config.max_length
UpperCAmelCase__ : Tuple = tokenizer(
A ,add_special_tokens=A ,padding=A ,truncation=A ,max_length=A ,return_tensors="""pt""" ,return_attention_mask=A ,).to(A )
UpperCAmelCase__ : Optional[Any] = encodings["""input_ids"""]
UpperCAmelCase__ : Dict = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : str = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 ,len(A ) ,A ) ):
UpperCAmelCase__ : List[Any] = min(start_index + batch_size ,len(A ) )
UpperCAmelCase__ : Optional[int] = encoded_texts[start_index:end_index]
UpperCAmelCase__ : int = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ : str = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
UpperCAmelCase__ : Tuple = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ : Any = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(A ), attn_mask] ,dim=1 )
UpperCAmelCase__ : Tuple = encoded_batch
with torch.no_grad():
UpperCAmelCase__ : str = model(A ,attention_mask=A ).logits
UpperCAmelCase__ : Optional[int] = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ : List[Any] = labels[..., 1:].contiguous()
UpperCAmelCase__ : Optional[Any] = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ : int = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 65 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class __snake_case ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = XLMProphetNetTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : List[str] = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = """[PAD]"""
snake_case__ : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1_0_1_2 )
def __UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def __UpperCamelCase ( self ):
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Tuple = """Hello World!"""
snake_case__ : str = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def __UpperCamelCase ( self ):
# fmt: off
snake_case__ : List[Any] = {"""input_ids""": [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 717 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : str = image_size
snake_case__ : List[str] = patch_size
snake_case__ : str = num_channels
snake_case__ : Any = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : List[Any] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Tuple = type_sequence_label_size
snake_case__ : int = initializer_range
snake_case__ : Tuple = scope
snake_case__ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Dict = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = ViTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = ViTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : str = 1
snake_case__ : Any = ViTForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = self.type_sequence_label_size
snake_case__ : Optional[int] = ViTForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : str = 1
snake_case__ : Tuple = ViTForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Dict = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
snake_case__ : str = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : str = config_and_inputs
snake_case__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = ViTModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Union[str, Any] = ViTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
snake_case__ : str = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_8_0 )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : Any = inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : Any = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
snake_case__ : Tuple = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : Tuple = inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
| 419 | 0 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 |
'''simple docstring'''
import json
import sys
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='utf-8' ) as f:
_SCREAMING_SNAKE_CASE =json.load(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =results[benchmark_name]
_SCREAMING_SNAKE_CASE =benchmark_name.split('/' )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
_SCREAMING_SNAKE_CASE ='| metric |'
_SCREAMING_SNAKE_CASE ='|--------|'
_SCREAMING_SNAKE_CASE ='| new / old (diff) |'
for metric_name in sorted(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =benchmark_res[metric_name]
_SCREAMING_SNAKE_CASE =metric_vals['new']
_SCREAMING_SNAKE_CASE =metric_vals.get('old' , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =metric_vals.get('diff' , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =f" {new_val:f}" if isinstance(_UpperCamelCase , (int, float) ) else 'None'
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(_UpperCamelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(_UpperCamelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(_UpperCamelCase ) )
if __name__ == "__main__":
lowerCamelCase : Dict = sys.argv[1]
lowerCamelCase : List[Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 405 | 0 |
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float(moles / volume ) * nfactor )
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 515 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 515 | 1 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> list[int]:
__snake_case : Optional[int] = 0
__snake_case : Union[str, Any] = len(lowercase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__snake_case : int = i + 1
else:
__snake_case : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 243 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Any =StableDiffusionDiffEditPipeline
UpperCAmelCase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
UpperCAmelCase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
UpperCAmelCase_ : Dict =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase_ : Union[str, Any] =frozenset([] )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
__snake_case : int = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
__snake_case : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_zero=UpperCAmelCase , )
torch.manual_seed(0 )
__snake_case : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
__snake_case : int = CLIPTextModel(UpperCAmelCase )
__snake_case : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = floats_tensor((1, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
__snake_case : Optional[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith("mps" ):
__snake_case : int = torch.manual_seed(UpperCAmelCase )
else:
__snake_case : str = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__snake_case : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
__snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Dict = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("RGB" )
if str(UpperCAmelCase ).startswith("mps" ):
__snake_case : str = torch.manual_seed(UpperCAmelCase )
else:
__snake_case : Dict = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__snake_case : Union[str, Any] = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
__snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Union[str, Any] = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("RGB" )
if str(UpperCAmelCase ).startswith("mps" ):
__snake_case : Tuple = torch.manual_seed(UpperCAmelCase )
else:
__snake_case : Any = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__snake_case : Dict = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not hasattr(self.pipeline_class , "_optional_components" ):
return
__snake_case : str = self.get_dummy_components()
__snake_case : Union[str, Any] = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__snake_case : Tuple = self.get_dummy_inputs(UpperCAmelCase )
__snake_case : List[str] = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__snake_case : Any = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
__snake_case : int = self.get_dummy_inputs(UpperCAmelCase )
__snake_case : List[Any] = pipe_loaded(**UpperCAmelCase )[0]
__snake_case : Any = np.abs(output - output_loaded ).max()
self.assertLess(UpperCAmelCase , 1E-4 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = "cpu"
__snake_case : str = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__snake_case : Dict = self.get_dummy_mask_inputs(UpperCAmelCase )
__snake_case : List[str] = pipe.generate_mask(**UpperCAmelCase )
__snake_case : str = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__snake_case : Union[str, Any] = np.array([0] * 9 )
__snake_case : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = "cpu"
__snake_case : str = self.get_dummy_components()
__snake_case : str = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__snake_case : Optional[int] = self.get_dummy_inversion_inputs(UpperCAmelCase )
__snake_case : Dict = pipe.invert(**UpperCAmelCase ).images
__snake_case : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__snake_case : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
__snake_case : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Any = "cpu"
__snake_case : Tuple = self.get_dummy_components()
__snake_case : str = {"beta_start": 0.00_085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
__snake_case : List[Any] = DPMSolverMultistepScheduler(**UpperCAmelCase )
__snake_case : Optional[int] = DPMSolverMultistepInverseScheduler(**UpperCAmelCase )
__snake_case : Tuple = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__snake_case : Optional[int] = self.get_dummy_inversion_inputs(UpperCAmelCase )
__snake_case : Any = pipe.invert(**UpperCAmelCase ).images
__snake_case : Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__snake_case : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
__snake_case : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
'''simple docstring'''
__snake_case : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
__snake_case : Dict = raw_image.convert("RGB" ).resize((768, 768) )
__snake_case : int = raw_image
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = torch.manual_seed(0 )
__snake_case : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
__snake_case : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
__snake_case : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__snake_case : Dict = "a bowl of fruit"
__snake_case : Any = "a bowl of pears"
__snake_case : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
__snake_case : Optional[int] = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase ).latents
__snake_case : Union[str, Any] = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
__snake_case : List[Any] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = torch.manual_seed(0 )
__snake_case : Optional[int] = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
__snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__snake_case : Tuple = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__snake_case : List[str] = "a bowl of fruit"
__snake_case : Optional[int] = "a bowl of pears"
__snake_case : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
__snake_case : Any = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase , num_inference_steps=25 , ).latents
__snake_case : Optional[int] = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
__snake_case : List[Any] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 243 | 1 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return lst
__snake_case : Tuple = 1
while i < len(__SCREAMING_SNAKE_CASE ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__snake_case : int = lst[i], lst[i - 1]
i -= 1
if i == 0:
__snake_case : List[Any] = 1
return lst
if __name__ == "__main__":
snake_case = input("Enter numbers separated by a comma:\n").strip()
snake_case = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 716 | from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = ["image_processor", "tokenizer"]
A : Union[str, Any] = "BridgeTowerImageProcessor"
A : Union[str, Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Tuple , ):
__snake_case : Dict = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
# add pixel_values + pixel_mask
__snake_case : str = self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , **_lowerCAmelCase )
encoding.update(_lowerCAmelCase )
return encoding
def snake_case__ ( self : List[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : str ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def snake_case__ ( self : List[Any] ):
__snake_case : int = self.tokenizer.model_input_names
__snake_case : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 390 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 400 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'text': Value('string' )} )
_snake_case : ClassVar[Features] = Features({'summary': Value('string' )} )
_snake_case : str = "text"
_snake_case : str = "summary"
@property
def A ( self : Any )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"} | 505 | 0 |
"""simple docstring"""
def A__ ( _UpperCAmelCase : float , _UpperCAmelCase : list[float] ) -> float:
'''simple docstring'''
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
snake_case__ : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCAmelCase ) )
return round(_UpperCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A__ ( _UpperCAmelCase : Tuple=None ) -> Any:
'''simple docstring'''
if subparsers is not None:
snake_case__ : List[Any] = subparsers.add_parser("test" )
else:
snake_case__ : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_UpperCAmelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def A__ ( _UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case__ : Optional[int] = script_name
else:
snake_case__ : List[str] = F"""--config_file={args.config_file} {script_name}"""
snake_case__ : List[Any] = ["accelerate-launch"] + test_args.split()
snake_case__ : Any = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def A__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = test_command_parser()
snake_case__ : str = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 150 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _snake_case :
lowerCAmelCase_ : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase_ : bool = field(
default=lowercase_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase_ : bool = field(
default=lowercase_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class _snake_case :
lowerCAmelCase_ : str = field(
default=lowercase_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase_ : str = field(
default=lowercase_ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase_ , metadata={"help": "Train language if it is different from the evaluation language."} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase_ : Optional[bool] = field(
default=lowercase_ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
lowerCAmelCase_ : bool = field(
default=lowercase_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCAmelCase_ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase_ : bool = field(
default=lowercase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase_ : bool = field(
default=lowercase_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , snake_case )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(snake_case )
datasets.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
snake_case_ = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = train_dataset.features["label"].names
if training_args.do_eval:
snake_case_ = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = eval_dataset.features["label"].names
if training_args.do_predict:
snake_case_ = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = predict_dataset.features["label"].names
# Labels
snake_case_ = len(snake_case )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case , idalabel={str(snake_case ): label for i, label in enumerate(snake_case )} , labelaid={label: i for i, label in enumerate(snake_case )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
snake_case_ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case_ = False
def preprocess_function(snake_case : Dict ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=snake_case , max_length=data_args.max_seq_length , truncation=snake_case , )
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ = min(len(snake_case ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(snake_case ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
snake_case_ = train_dataset.map(
snake_case , batched=snake_case , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(snake_case ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ = min(len(snake_case ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(snake_case ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
snake_case_ = eval_dataset.map(
snake_case , batched=snake_case , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
snake_case_ = min(len(snake_case ) , data_args.max_predict_samples )
snake_case_ = predict_dataset.select(range(snake_case ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
snake_case_ = predict_dataset.map(
snake_case , batched=snake_case , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
snake_case_ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case : EvalPrediction ):
snake_case_ = p.predictions[0] if isinstance(p.predictions , snake_case ) else p.predictions
snake_case_ = np.argmax(snake_case , axis=1 )
return metric.compute(predictions=snake_case , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case_ = default_data_collator
elif training_args.fpaa:
snake_case_ = DataCollatorWithPadding(snake_case , pad_to_multiple_of=8 )
else:
snake_case_ = None
# Initialize our Trainer
snake_case_ = Trainer(
model=snake_case , args=snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case , tokenizer=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=snake_case )
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case )
)
snake_case_ = min(snake_case , len(snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , snake_case )
trainer.save_metrics("train" , snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case_ = trainer.evaluate(eval_dataset=snake_case )
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case )
snake_case_ = min(snake_case , len(snake_case ) )
trainer.log_metrics("eval" , snake_case )
trainer.save_metrics("eval" , snake_case )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
snake_case_ , snake_case_ , snake_case_ = trainer.predict(snake_case , metric_key_prefix="predict" )
snake_case_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(snake_case )
)
snake_case_ = min(snake_case , len(snake_case ) )
trainer.log_metrics("predict" , snake_case )
trainer.save_metrics("predict" , snake_case )
snake_case_ = np.argmax(snake_case , axis=1 )
snake_case_ = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(snake_case , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(snake_case ):
snake_case_ = label_list[item]
writer.write(f'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 400 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_SCREAMING_SNAKE_CASE : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : int = 1_0000
lowerCAmelCase_ : Optional[List[str]] = None
lowerCAmelCase_ : Optional[datasets.Features] = None
class _snake_case ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ : Tuple = ParquetConfig
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
snake_case_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
snake_case_ = data_files
if isinstance(a__ , a__ ):
snake_case_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
snake_case_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , "rb" ) as f:
snake_case_ = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self , a__ ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case_ = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , "rb" ) as f:
snake_case_ = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
snake_case_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(a__ )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(a__ )}: {e}' )
raise
| 400 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowercase__ =[
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
lowercase__ =[
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def __UpperCamelCase ( ):
__a : Tuple = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , bootstrap_aggregation=lowerCAmelCase__ , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Dict = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , bootstrap_aggregation=lowerCAmelCase__ , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def __UpperCamelCase ( ):
__a : List[str] = '''rougeLsum'''
__a : Optional[int] = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , newline_sep=lowerCAmelCase__ , rouge_keys=[k] )[k]
__a : Optional[Any] = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , newline_sep=lowerCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def __UpperCamelCase ( ):
__a : int = ['''rouge1''', '''rouge2''', '''rougeL''']
__a : Optional[int] = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , newline_sep=lowerCAmelCase__ , rouge_keys=lowerCAmelCase__ )
__a : Union[str, Any] = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , newline_sep=lowerCAmelCase__ , rouge_keys=lowerCAmelCase__ )
assert score_sep == score_no_sep
def __UpperCamelCase ( ):
__a : Union[str, Any] = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
__a : Optional[Any] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , newline_sep=lowerCAmelCase__ ) == calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , newline_sep=lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : int = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
__a : List[str] = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
__a : str = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , rouge_keys=['''rougeLsum'''] , newline_sep=lowerCAmelCase__ )['''rougeLsum''']
__a : Union[str, Any] = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def __UpperCamelCase ( ):
__a : Dict = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
__a : Dict = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Optional[int] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
| 326 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
def __init__(self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int]=7 , snake_case_ : List[str]=3 , snake_case_ : List[str]=3_0 , snake_case_ : Union[str, Any]=4_0_0 , snake_case_ : Optional[Any]=True , snake_case_ : Tuple=None , snake_case_ : List[Any]=True , snake_case_ : Tuple=[0.5, 0.5, 0.5] , snake_case_ : Optional[int]=[0.5, 0.5, 0.5] , snake_case_ : Dict=True , snake_case_ : Any=1 / 2_5_5 , snake_case_ : Any=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a : Optional[Any] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
__a : List[Any] = parent
__a : Optional[Any] = batch_size
__a : int = num_channels
__a : Any = min_resolution
__a : Optional[Any] = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size
__a : Dict = do_normalize
__a : Any = image_mean
__a : Tuple = image_std
__a : Union[str, Any] = do_rescale
__a : Union[str, Any] = rescale_factor
__a : List[Any] = do_pad
def lowerCAmelCase (self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase (self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=False ):
if not batched:
__a : str = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : int = int(self.size['''shortest_edge'''] * h / w )
__a : Any = self.size['''shortest_edge''']
elif w > h:
__a : Tuple = self.size['''shortest_edge''']
__a : int = int(self.size['''shortest_edge'''] * w / h )
else:
__a : List[Any] = self.size['''shortest_edge''']
__a : Dict = self.size['''shortest_edge''']
else:
__a : Union[str, Any] = []
for image in image_inputs:
__a , __a : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : Union[str, Any] = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__a : Any = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def lowerCAmelCase (self : Any ):
__a : Any = YolosImageProcessingTester(self )
@property
def lowerCAmelCase (self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase (self : Optional[int] ):
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_std''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
def lowerCAmelCase (self : Union[str, Any] ):
__a : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , snake_case_ )
__a : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def lowerCAmelCase (self : str ):
pass
def lowerCAmelCase (self : Optional[Any] ):
# Initialize image_processing
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : int = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__a : str = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : str ):
# Initialize image_processing
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : Dict = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : int = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
__a , __a : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : Optional[Any] ):
# Initialize image_processing
__a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__a : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Tuple = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
__a , __a : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : Any ):
# Initialize image_processings
__a : Any = self.image_processing_class(**self.image_processor_dict )
__a : str = self.image_processing_class(do_resize=snake_case_ , do_normalize=snake_case_ , do_rescale=snake_case_ )
# create random PyTorch tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__a : List[Any] = image_processing_a.pad(snake_case_ , return_tensors='''pt''' )
__a : Union[str, Any] = image_processing_a(snake_case_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def lowerCAmelCase (self : List[str] ):
# prepare image and target
__a : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__a : str = json.loads(f.read() )
__a : Dict = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
__a : Optional[Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
__a : Tuple = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors='''pt''' )
# verify pixel values
__a : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , snake_case_ )
__a : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
__a : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , snake_case_ ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , snake_case_ )
__a : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
__a : Optional[Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , snake_case_ ) )
# verify is_crowd
__a : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , snake_case_ ) )
# verify class_labels
__a : Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , snake_case_ ) )
# verify orig_size
__a : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , snake_case_ ) )
# verify size
__a : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , snake_case_ ) )
@slow
def lowerCAmelCase (self : Optional[int] ):
# prepare image, target and masks_path
__a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__a : int = json.loads(f.read() )
__a : Optional[int] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
__a : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__a : Any = YolosImageProcessor(format='''coco_panoptic''' )
__a : Any = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors='''pt''' )
# verify pixel values
__a : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , snake_case_ )
__a : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , snake_case_ ) )
# verify boxes
__a : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , snake_case_ )
__a : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
__a : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , snake_case_ ) )
# verify is_crowd
__a : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , snake_case_ ) )
# verify class_labels
__a : Any = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , snake_case_ ) )
# verify masks
__a : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , snake_case_ )
# verify orig_size
__a : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , snake_case_ ) )
# verify size
__a : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , snake_case_ ) )
| 326 | 1 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
_lowerCamelCase = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 71 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 374 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowerCamelCase = logging.getLogger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: List[Any] = """sequence-classification"""
def __init__( self : List[Any] , _lowerCamelCase : Any ):
'''simple docstring'''
if type(_lowerCamelCase ) == dict:
_UpperCAmelCase : int = Namespace(**_lowerCamelCase )
_UpperCAmelCase : Tuple = glue_output_modes[hparams.task]
_UpperCAmelCase : List[str] = glue_tasks_num_labels[hparams.task]
super().__init__(_lowerCamelCase , _lowerCamelCase , self.mode )
def a__ ( self : Optional[int] , **_lowerCamelCase : str ):
'''simple docstring'''
return self.model(**_lowerCamelCase )
def a__ ( self : str , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_UpperCAmelCase : int = self(**_lowerCamelCase )
_UpperCAmelCase : str = outputs[0]
_UpperCAmelCase : List[str] = self.trainer.lr_schedulers[0]["scheduler"]
_UpperCAmelCase : List[str] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a__ ( self : List[Any] ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.hparams
_UpperCAmelCase : List[Any] = processors[args.task]()
_UpperCAmelCase : str = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCAmelCase : Optional[int] = self._feature_file(_lowerCamelCase )
if os.path.exists(_lowerCamelCase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , _lowerCamelCase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
_UpperCAmelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
_UpperCAmelCase : Any = convert_examples_to_features(
_lowerCamelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
def a__ ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = "dev" if mode == "test" else mode
_UpperCAmelCase : Optional[Any] = self._feature_file(_lowerCamelCase )
logger.info("Loading features from cached file %s" , _lowerCamelCase )
_UpperCAmelCase : List[str] = torch.load(_lowerCamelCase )
_UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_UpperCAmelCase : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_UpperCAmelCase : str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Tuple = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , batch_size=_lowerCamelCase , shuffle=_lowerCamelCase , )
def a__ ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Optional[Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_UpperCAmelCase : Tuple = self(**_lowerCamelCase )
_UpperCAmelCase : List[Any] = outputs[:2]
_UpperCAmelCase : str = logits.detach().cpu().numpy()
_UpperCAmelCase : List[str] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a__ ( self : Dict , _lowerCamelCase : Any ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
_UpperCAmelCase : Dict = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Dict = np.argmax(_lowerCamelCase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Dict = np.squeeze(_lowerCamelCase )
_UpperCAmelCase : Tuple = np.concatenate([x["target"] for x in outputs] , axis=0 )
_UpperCAmelCase : Dict = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[Any] = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , _lowerCamelCase , _lowerCamelCase )}
_UpperCAmelCase : List[Any] = dict(results.items() )
_UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def a__ ( self : int , _lowerCamelCase : list ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self._eval_end(_lowerCamelCase )
_UpperCAmelCase : int = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a__ ( self : int , _lowerCamelCase : Tuple ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self._eval_end(_lowerCamelCase )
_UpperCAmelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(_lowerCamelCase , _lowerCamelCase )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=_lowerCamelCase , required=_lowerCamelCase , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=_lowerCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE , os.getcwd() )
_UpperCAmelCase : int = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE , os.getcwd() )
_UpperCAmelCase : Dict = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCAmelCase : str = os.path.join(
"./results" , F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
_UpperCAmelCase : Dict = GLUETransformer(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = generic_train(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCAmelCase : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 706 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _UpperCamelCase( unittest.TestCase ):
__A: int = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def a__ ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
_UpperCAmelCase : str = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
_UpperCAmelCase : Any = VideoClassificationPipeline(model=_lowerCamelCase , image_processor=_lowerCamelCase , top_k=2 )
_UpperCAmelCase : Union[str, Any] = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def a__ ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Any ):
for example in examples:
_UpperCAmelCase : str = video_classifier(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
{"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )},
] , )
@require_torch
def a__ ( self : int ):
_UpperCAmelCase : Union[str, Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
_UpperCAmelCase : int = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
_UpperCAmelCase : Tuple = pipeline(
"video-classification" , model=_lowerCamelCase , feature_extractor=_lowerCamelCase , frame_sampling_rate=4 )
_UpperCAmelCase : Tuple = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
_UpperCAmelCase : List[Any] = video_classifier(_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}] , )
_UpperCAmelCase : Optional[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
] , )
@require_tf
def a__ ( self : str ):
pass
| 328 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : List[str] =logging.get_logger(__name__)
class lowerCamelCase__ ( a_):
'''simple docstring'''
def __init__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,a_ ,)
super().__init__(*a_ ,**a_ )
| 647 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> str:
a_ : Tuple = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : Any = downstream_dict["projector.weight"]
a_ : Dict = downstream_dict["projector.bias"]
a_ : Tuple = downstream_dict["model.post_net.linear.weight"]
a_ : int = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = downstream_dict["model.linear.weight"]
a_ : List[Any] = downstream_dict["model.linear.bias"]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : int = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : Any = downstream_dict["connector.weight"]
a_ : Tuple = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ : List[str] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ : int = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ : str = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ : List[str] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__, map_location="cpu" )
a_ : List[str] = checkpoint["Downstream"]
a_ : Union[str, Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE__, return_attention_mask=SCREAMING_SNAKE_CASE__, do_normalize=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ : int = convert_classification(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ : Any = convert_diarization(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForXVector" ):
a_ : Any = convert_xvector(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ : Tuple = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 237 | 0 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class UpperCamelCase_ (__A ):
def __init__( self : Dict , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.values[key]
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return (
sum(self.charge_factor - len(lowerCAmelCase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple=None ) -> Tuple:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase_ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase_ , lowerCAmelCase_ )
| 712 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''▁'''
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCamelCase_ = {
'''facebook/xglm-564M''': 2048,
}
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Tuple="</s>" , lowerCAmelCase_ : List[str]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : List[str] , ) -> None:
UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase_ : Any = 7
UpperCAmelCase_ : Dict = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCAmelCase_ : Optional[int] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : Tuple = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCAmelCase_ : List[str] = len(self.sp_model )
UpperCAmelCase_ : List[Any] = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.__dict__.copy()
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , lowerCAmelCase_ : Any ) -> Any:
UpperCAmelCase_ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ ))
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ ))
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Optional[int] = self.sp_model.PieceToId(lowerCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Tuple ) -> Optional[int]:
UpperCAmelCase_ : Any = "".join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , "wb" ) as fi:
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 463 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[str] ={
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = """sew-d"""
def __init__(self : Union[str, Any] , __a : int=32 , __a : int=768 , __a : int=12 , __a : str=12 , __a : List[Any]=3072 , __a : Optional[Any]=2 , __a : Optional[Any]=512 , __a : Tuple=256 , __a : Optional[int]=True , __a : int=True , __a : Dict=("p2c", "c2p") , __a : Dict="layer_norm" , __a : Tuple="gelu_python" , __a : List[str]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.0 , __a : Any=0.1 , __a : Any=0.02 , __a : int=1E-7 , __a : Tuple=1E-5 , __a : Union[str, Any]="group" , __a : str="gelu" , __a : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a : int=False , __a : List[Any]=128 , __a : List[Any]=16 , __a : Optional[Any]=True , __a : Dict=0.05 , __a : List[Any]=10 , __a : Any=2 , __a : Tuple=0.0 , __a : Dict=10 , __a : Tuple=0 , __a : Optional[Any]="mean" , __a : Optional[int]=False , __a : Dict=False , __a : Any=256 , __a : str=0 , __a : Optional[Any]=1 , __a : Any=2 , **__a : Any , ):
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = feat_extract_norm
UpperCAmelCase_ = feat_extract_activation
UpperCAmelCase_ = list(__a )
UpperCAmelCase_ = list(__a )
UpperCAmelCase_ = list(__a )
UpperCAmelCase_ = conv_bias
UpperCAmelCase_ = num_conv_pos_embeddings
UpperCAmelCase_ = num_conv_pos_embedding_groups
UpperCAmelCase_ = len(self.conv_dim )
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = squeeze_factor
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = position_buckets
UpperCAmelCase_ = share_att_key
UpperCAmelCase_ = relative_attention
UpperCAmelCase_ = norm_rel_ebd
UpperCAmelCase_ = list(__a )
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = feat_proj_dropout
UpperCAmelCase_ = final_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = feature_layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ = apply_spec_augment
UpperCAmelCase_ = mask_time_prob
UpperCAmelCase_ = mask_time_length
UpperCAmelCase_ = mask_time_min_masks
UpperCAmelCase_ = mask_feature_prob
UpperCAmelCase_ = mask_feature_length
UpperCAmelCase_ = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# sequence classification
UpperCAmelCase_ = use_weighted_layer_sum
UpperCAmelCase_ = classifier_proj_size
@property
def _lowercase (self : str ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 78 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 0 |
class _snake_case :
def __init__( self : Union[str, Any] ):
lowercase__ = ""
lowercase__ = ""
lowercase__ = []
def A__ ( self : Optional[Any], __lowercase : int, __lowercase : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase__ = self.__min_dist_top_down_dp(m - 1, n - 1 )
else:
lowercase__ = self.__min_dist_top_down_dp(__lowercase, n - 1 )
lowercase__ = self.__min_dist_top_down_dp(m - 1, __lowercase )
lowercase__ = self.__min_dist_top_down_dp(m - 1, n - 1 )
lowercase__ = 1 + min(__lowercase, __lowercase, __lowercase )
return self.dp[m][n]
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : str ):
lowercase__ = worda
lowercase__ = worda
lowercase__ = [[-1 for _ in range(len(__lowercase ) )] for _ in range(len(__lowercase ) )]
return self.__min_dist_top_down_dp(len(__lowercase ) - 1, len(__lowercase ) - 1 )
def A__ ( self : List[str], __lowercase : str, __lowercase : str ):
lowercase__ = worda
lowercase__ = worda
lowercase__ = len(__lowercase )
lowercase__ = len(__lowercase )
lowercase__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase__ = j
elif j == 0: # second string is empty
lowercase__ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase__ = self.dp[i - 1][j - 1]
else:
lowercase__ = self.dp[i][j - 1]
lowercase__ = self.dp[i - 1][j]
lowercase__ = self.dp[i - 1][j - 1]
lowercase__ = 1 + min(__lowercase, __lowercase, __lowercase )
return self.dp[m][n]
if __name__ == "__main__":
lowercase_ = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
lowercase_ = input("""Enter the first string: """).strip()
lowercase_ = input("""Enter the second string: """).strip()
print()
print(F'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(F'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 37 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _snake_case ( lowercase__):
def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ):
lowercase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowercase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase__ = token_dict["token"]
lowercase__ = Tokenizer(Unigram() )
lowercase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ), " " ),
normalizers.Lowercase(),
] )
lowercase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ),
pre_tokenizers.Digits(individual_digits=__lowercase ),
pre_tokenizers.Punctuation(),
] )
lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase )
lowercase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
lowercase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__lowercase, __lowercase )
def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
if isinstance(__lowercase, __lowercase ):
lowercase__ = [files]
self._tokenizer.train(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : str ):
lowercase__ = json.loads(self._tokenizer.to_str() )
lowercase__ = self.special_tokens["unk"]["id"]
lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
| 37 | 1 |
from __future__ import annotations
from collections import Counter
from random import random
class lowerCamelCase :
def __init__( self):
__UpperCAmelCase : Dict = {}
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = {}
def A( self , lowercase__ , lowercase__ , lowercase__):
if nodea not in self.connections:
self.add_node(_A)
if nodea not in self.connections:
self.add_node(_A)
__UpperCAmelCase : Optional[int] = probability
def A( self):
return list(self.connections)
def A( self , lowercase__):
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Optional[int] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : List[str] = Counter(graph.get_nodes() )
__UpperCAmelCase : Optional[int] = start
for _ in range(SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase : Optional[int] = graph.transition(SCREAMING_SNAKE_CASE__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 462 |
'''simple docstring'''
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Tuple = len(SCREAMING_SNAKE_CASE__ )
__a : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__a : int = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__a : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__a : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
__a : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597 | 0 |
import string
import numpy
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _lowerCamelCase )
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
snake_case_ = numpy.vectorize(lambda __magic_name__ : x % 36 )
snake_case_ = numpy.vectorize(__magic_name__ )
def __init__( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
__lowerCamelCase = self.modulus(UpperCamelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowercase_ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
return self.key_string.index(UpperCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
return self.key_string[round(UpperCamelCase__ )]
def lowercase_ ( self ) -> None:
'''simple docstring'''
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase__ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(UpperCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase__ ) % self.break_key != 0:
chars.append(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = ''
for i in range(0 , len(UpperCamelCase__ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase__ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase__ ) ).T.tolist()[
0
]
__lowerCamelCase = ''.join(
self.replace_digits(UpperCamelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowercase_ ( self ) -> numpy.ndarray:
'''simple docstring'''
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase__ ) )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = ''
for i in range(0 , len(UpperCamelCase__ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase__ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase__ ) ).T.tolist()[0]
__lowerCamelCase = ''.join(
self.replace_digits(UpperCamelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = int(input('Enter the order of the encryption key: ' ) )
__lowerCamelCase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(_lowerCamelCase ):
__lowerCamelCase = [int(_lowerCamelCase ) for x in input().split()]
hill_matrix.append(_lowerCamelCase )
__lowerCamelCase = HillCipher(numpy.array(_lowerCamelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__lowerCamelCase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__lowerCamelCase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(_lowerCamelCase ) )
elif option == "2":
__lowerCamelCase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 700 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> int:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
__lowerCamelCase = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(UpperCamelCase__ )
else:
__lowerCamelCase = sylvester(number - 1 )
__lowerCamelCase = num - 1
__lowerCamelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 167 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A__ : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ['input_values', 'padding_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2_40_00 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> str:
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = chunk_length_s
__lowerCamelCase : Union[str, Any] = overlap
@property
def lowercase_ ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase_ ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
__lowerCamelCase : Tuple = True
__lowerCamelCase : Tuple = bool(
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__lowerCamelCase : Tuple = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
__lowerCamelCase : List[Any] = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__lowerCamelCase : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ).T]
# verify inputs are valid
for idx, example in enumerate(SCREAMING_SNAKE_CASE_ ):
if example.ndim > 2:
raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels' )
__lowerCamelCase : Any = None
__lowerCamelCase : Dict = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__lowerCamelCase : Optional[Any] = min(array.shape[0] for array in raw_audio )
__lowerCamelCase : List[str] = int(np.floor(max_length / self.chunk_stride ) )
__lowerCamelCase : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__lowerCamelCase : List[Any] = max(array.shape[0] for array in raw_audio )
__lowerCamelCase : int = int(np.ceil(max_length / self.chunk_stride ) )
__lowerCamelCase : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
__lowerCamelCase : Union[str, Any] = 'max_length'
else:
__lowerCamelCase : Union[str, Any] = input_values
# normal padding on batch
if padded_inputs is None:
__lowerCamelCase : Any = self.pad(
SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
if padding:
__lowerCamelCase : List[str] = padded_inputs.pop('attention_mask' )
__lowerCamelCase : str = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
__lowerCamelCase : Any = example[..., None]
input_values.append(example.T )
__lowerCamelCase : List[str] = input_values
if return_tensors is not None:
__lowerCamelCase : Union[str, Any] = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 13 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def __UpperCamelCase ( _A ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
lowerCAmelCase_ = _readaa(_A )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = bytestream.read(rows * cols * num_images )
lowerCAmelCase_ = numpy.frombuffer(_A , dtype=numpy.uinta )
lowerCAmelCase_ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = labels_dense.shape[0]
lowerCAmelCase_ = numpy.arange(_A ) * num_classes
lowerCAmelCase_ = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase_ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def __UpperCamelCase ( _A , _A=False , _A=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
lowerCAmelCase_ = _readaa(_A )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = bytestream.read(_A )
lowerCAmelCase_ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class A :
@deprecated(
UpperCamelCase__, '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''', )
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=dtypes.floataa, UpperCamelCase__=True, UpperCamelCase__=None, ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = random_seed.get_seed(UpperCamelCase__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase_ = dtypes.as_dtype(UpperCamelCase__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowerCAmelCase_ = 1_0000
lowerCAmelCase_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase_ = images.reshape(
images.shape[0], images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase_ = images.astype(numpy.floataa )
lowerCAmelCase_ = numpy.multiply(UpperCamelCase__, 1.0 / 255.0 )
lowerCAmelCase_ = images
lowerCAmelCase_ = labels
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._images
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._labels
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._num_examples
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._epochs_completed
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False, UpperCamelCase__=True ):
"""simple docstring"""
if fake_data:
lowerCAmelCase_ = [1] * 784
lowerCAmelCase_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCamelCase__ )],
[fake_label for _ in range(UpperCamelCase__ )],
)
lowerCAmelCase_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
lowerCAmelCase_ = self.images[perma]
lowerCAmelCase_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase_ = self._num_examples - start
lowerCAmelCase_ = self._images[start : self._num_examples]
lowerCAmelCase_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
lowerCAmelCase_ = self.images[perm]
lowerCAmelCase_ = self.labels[perm]
# Start next epoch
lowerCAmelCase_ = 0
lowerCAmelCase_ = batch_size - rest_num_examples
lowerCAmelCase_ = self._index_in_epoch
lowerCAmelCase_ = self._images[start:end]
lowerCAmelCase_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part), axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part), axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def __UpperCamelCase ( _A , _A , _A ):
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
lowerCAmelCase_ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
lowerCAmelCase_ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __UpperCamelCase ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=5000 , _A=None , _A=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
lowerCAmelCase_ = fake()
lowerCAmelCase_ = fake()
lowerCAmelCase_ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
lowerCAmelCase_ = DEFAULT_SOURCE_URL
lowerCAmelCase_ = '''train-images-idx3-ubyte.gz'''
lowerCAmelCase_ = '''train-labels-idx1-ubyte.gz'''
lowerCAmelCase_ = '''t10k-images-idx3-ubyte.gz'''
lowerCAmelCase_ = '''t10k-labels-idx1-ubyte.gz'''
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_images(_A )
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_labels(_A , one_hot=_A )
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_images(_A )
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
lowerCAmelCase_ = (
'''Validation size should be between 0 and '''
f"{len(_A )}. Received: {validation_size}."
)
raise ValueError(_A )
lowerCAmelCase_ = train_images[:validation_size]
lowerCAmelCase_ = train_labels[:validation_size]
lowerCAmelCase_ = train_images[validation_size:]
lowerCAmelCase_ = train_labels[validation_size:]
lowerCAmelCase_ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowerCAmelCase_ = _DataSet(_A , _A , **_A )
lowerCAmelCase_ = _DataSet(_A , _A , **_A )
lowerCAmelCase_ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 431 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase_ : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : Optional[int]=None ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(
lowercase__ , question_encoder_tokenizer=lowercase__ , generator_tokenizer=lowercase__ , index=lowercase__ , init_retrieval=lowercase__ , )
_UpperCamelCase : int = None
def snake_case__ ( self : Dict , lowercase__ : Dict ) ->Dict:
'''simple docstring'''
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_UpperCamelCase : Optional[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCamelCase : Optional[Any] = str(distributed_port + 1 )
_UpperCamelCase : str = dist.new_group(ranks=lowercase__ , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def snake_case__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def snake_case__ ( self : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Any=torch.floataa ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : int = torch.empty(lowercase__ , dtype=lowercase__ )
dist.scatter(lowercase__ , src=0 , scatter_list=lowercase__ , group=self.process_group )
return target_tensor
def snake_case__ ( self : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCamelCase : Tuple = next((addr for addr in addrs if addr.startswith("e" )) , lowercase__ )
return ifname
def snake_case__ ( self : Any , lowercase__ : Union[str, Any] , lowercase__ : Any ) ->Tuple:
'''simple docstring'''
if not dist.is_initialized():
_UpperCamelCase , _UpperCamelCase : Optional[int] = self._main_retrieve(lowercase__ , lowercase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase__ )
# distributed training
_UpperCamelCase : str = dist.get_world_size(group=self.process_group )
# gather logic
_UpperCamelCase : Optional[int] = None
if self._is_main():
_UpperCamelCase : Tuple = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowercase__ )]
dist.gather(torch.tensor(lowercase__ ) , dst=0 , gather_list=lowercase__ , group=self.process_group )
# scatter logic
_UpperCamelCase : Optional[Any] = question_hidden_states.shape[0]
_UpperCamelCase : List[str] = []
_UpperCamelCase : str = []
if self._is_main():
assert len(lowercase__ ) == world_size
_UpperCamelCase , _UpperCamelCase : Tuple = self._main_retrieve(torch.cat(lowercase__ ).numpy() , lowercase__ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = torch.tensor(lowercase__ ), torch.tensor(lowercase__ )
_UpperCamelCase : Any = self._chunk_tensor(lowercase__ , lowercase__ )
_UpperCamelCase : Dict = self._chunk_tensor(lowercase__ , lowercase__ )
_UpperCamelCase : List[str] = self._scattered(lowercase__ , [n_queries, n_docs] , target_type=torch.intaa )
_UpperCamelCase : List[str] = self._scattered(lowercase__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase__ )
| 716 | '''simple docstring'''
import requests
from bsa import BeautifulSoup
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : Dict = BeautifulSoup(requests.get(UpperCAmelCase ,params=UpperCAmelCase ).content ,"html.parser" )
_UpperCamelCase : Union[str, Any] = soup.find("div" ,attrs={"class": "gs_ri"} )
_UpperCamelCase : Optional[Any] = div.find("div" ,attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase_ : Optional[int] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 204 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 'pegasus'
UpperCAmelCase : str = ['past_key_values']
UpperCAmelCase : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , snake_case : Tuple=50265 , snake_case : List[str]=1024 , snake_case : Optional[Any]=12 , snake_case : List[str]=4096 , snake_case : Optional[Any]=16 , snake_case : Optional[int]=12 , snake_case : Any=4096 , snake_case : Any=16 , snake_case : List[Any]=0.0 , snake_case : List[Any]=0.0 , snake_case : Tuple=True , snake_case : Any=True , snake_case : Optional[int]="gelu" , snake_case : str=1024 , snake_case : List[Any]=0.1 , snake_case : List[Any]=0.0 , snake_case : List[str]=0.0 , snake_case : str=0.02 , snake_case : str=0 , snake_case : Dict=False , snake_case : Optional[int]=0 , snake_case : str=1 , snake_case : Optional[Any]=1 , **snake_case : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Dict = encoder_layers
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : Dict = activation_dropout
SCREAMING_SNAKE_CASE : str = activation_function
SCREAMING_SNAKE_CASE : Optional[Any] = init_std
SCREAMING_SNAKE_CASE : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , forced_eos_token_id=snake_case , **snake_case , )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.d_model | 352 |
from __future__ import annotations
def __a ( __lowerCAmelCase , __lowerCAmelCase = None ) -> list[list[str]]:
SCREAMING_SNAKE_CASE : Dict = word_bank or []
# create a table
SCREAMING_SNAKE_CASE : int = len(__lowerCAmelCase ) + 1
SCREAMING_SNAKE_CASE : list[list[list[str]]] = []
for _ in range(__lowerCAmelCase ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE : Tuple = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCAmelCase )] == word:
SCREAMING_SNAKE_CASE : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCAmelCase )]:
combination.reverse()
return table[len(__lowerCAmelCase )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
) | 352 | 1 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
lowercase__ :int = int(input("Enter number: ").strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 633 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float ):
return 10 - x * x
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case_ ) * equation(snake_case_ ) >= 0:
raise ValueError('''Wrong space!''' )
__UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
__UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(snake_case_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_ ) * equation(snake_case_ ) < 0:
__UpperCAmelCase = c
else:
__UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 49 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Tuple = frozenset([] )
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
__a = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
__a = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableDiffusionInpaintPipeline(**_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = sd_pipe(**_a ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' )
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 695 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 596 |
import os
lowerCAmelCase_ = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Any = 0
lowercase : Any = 0
while index < len(__magic_name__ ) - 1:
lowercase : List[Any] = SYMBOLS[numerals[index]]
lowercase : Optional[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : List[Any] = ''''''
lowercase : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowercase : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowercase : Optional[Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def snake_case( __magic_name__ = "/p089_roman.txt" ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = 0
with open(os.path.dirname(__magic_name__ ) + roman_numerals_filename ) as filea:
lowercase : List[str] = filea.readlines()
for line in lines:
lowercase : Dict = line.strip()
lowercase : Optional[int] = parse_roman_numerals(__magic_name__ )
lowercase : List[Any] = generate_roman_numerals(__magic_name__ )
savings += len(__magic_name__ ) - len(__magic_name__ )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''') | 596 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def _SCREAMING_SNAKE_CASE ( snake_case ) -> typing.Counter[int]:
_UpperCAmelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a__ , max_perimeter + 1 ):
_UpperCAmelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a__ ):
_UpperCAmelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _SCREAMING_SNAKE_CASE ( snake_case = 1_0_0_0 ) -> int:
_UpperCAmelCase = pythagorean_triple(a__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions') | 518 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
A__ = logging.getLogger(__name__)
class _lowerCAmelCase :
def __init__( self : int ):
lowerCamelCase :Dict = False
def snake_case ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : int , __snake_case : str ):
if not self.initialized:
lowerCamelCase :Optional[int] = RagRetriever(
__snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , index=__snake_case , init_retrieval=__snake_case , )
lowerCamelCase :Any = True
def snake_case ( self : str ):
self.retriever.index.init_index()
def snake_case ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] ):
lowerCamelCase , lowerCamelCase :int = self.retriever._main_retrieve(__snake_case , __snake_case )
return doc_ids, retrieved_doc_embeds
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , __snake_case : str , __snake_case : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]=None ):
if index is not None and index.is_initialized() and len(__snake_case ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
__snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , index=__snake_case , init_retrieval=__snake_case , )
lowerCamelCase :Tuple = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__snake_case , __snake_case , __snake_case , __snake_case )
for worker in self.retrieval_workers
] )
def snake_case ( self : Optional[Any] ):
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def snake_case ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[int] ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCamelCase :str = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCamelCase , lowerCamelCase :Dict = ray.get(random_worker.retrieve.remote(__snake_case , __snake_case ) )
else:
lowerCamelCase , lowerCamelCase :Union[str, Any] = self._main_retrieve(__snake_case , __snake_case )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__snake_case )
@classmethod
def snake_case ( cls : int , __snake_case : Dict , __snake_case : List[Any]=None , **__snake_case : str ):
return super(__snake_case , cls ).get_tokenizers(__snake_case , __snake_case , **__snake_case )
@classmethod
def snake_case ( cls : Optional[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Union[str, Any]=None , **__snake_case : Tuple ):
lowerCamelCase :Tuple = kwargs.pop('''config''' , __snake_case ) or RagConfig.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = RagTokenizer.from_pretrained(__snake_case , config=__snake_case )
lowerCamelCase :Dict = rag_tokenizer.question_encoder
lowerCamelCase :Dict = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCamelCase :str = '''custom'''
lowerCamelCase :Any = CustomHFIndex(config.retrieval_vector_size , __snake_case )
else:
lowerCamelCase :List[str] = cls._build_index(__snake_case )
return cls(
__snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , retrieval_workers=__snake_case , index=__snake_case , )
| 49 | import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A_ : int , A_ : Tuple , A_ : List[str] , A_ : str ): # noqa: E741
"""simple docstring"""
while r - l > 1:
a_ : Any = (l + r) // 2
if v[m] >= key:
a_ : Optional[int] = m
else:
a_ : int = m # noqa: E741
return r
def _snake_case ( A_ : list[int] ):
"""simple docstring"""
if len(A_ ) == 0:
return 0
a_ : str = [0] * len(A_ )
a_ : List[str] = 1
a_ : Optional[Any] = v[0]
for i in range(1 , len(A_ ) ):
if v[i] < tail[0]:
a_ : Tuple = v[i]
elif v[i] > tail[length - 1]:
a_ : Optional[int] = v[i]
length += 1
else:
a_ : str = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 460 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
a_ : Optional[int] = parent
a_ : Union[str, Any] = batch_size
a_ : Any = num_channels
a_ : Tuple = image_size
a_ : Any = min_resolution
a_ : Dict = max_resolution
a_ : Optional[Any] = do_resize
a_ : Tuple = size if size is not None else {"""height""": 18, """width""": 20}
a_ : int = do_thumbnail
a_ : Optional[int] = do_align_axis
a_ : Tuple = do_pad
a_ : int = do_normalize
a_ : Tuple = image_mean
a_ : Optional[Any] = image_std
def _lowerCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = DonutImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = DonutImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_thumbnail""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_pad""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_std""" ) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
a_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
a_ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
@is_flaky()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
a_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
a_ : int = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
a_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
a_ : Dict = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
a_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
a_ : Any = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 460 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
# TODO Update this
__magic_name__ : Optional[int] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'esm'
def __init__( self : Optional[int] , __A : List[Any]=None , __A : Dict=None , __A : Tuple=None , __A : int=7_6_8 , __A : int=1_2 , __A : Tuple=1_2 , __A : Tuple=3_0_7_2 , __A : Optional[Any]=0.1 , __A : int=0.1 , __A : str=1_0_2_6 , __A : int=0.0_2 , __A : Dict=1e-12 , __A : Optional[Any]="absolute" , __A : Optional[Any]=True , __A : List[str]=None , __A : Optional[int]=False , __A : Union[str, Any]=False , __A : List[str]=None , __A : Optional[int]=None , **__A : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=__A , mask_token_id=__A , **__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = emb_layer_norm_before
_lowercase = token_dropout
_lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowercase = EsmFoldConfig()
elif isinstance(__A , __A ):
_lowercase = EsmFoldConfig(**__A )
_lowercase = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowercase = get_default_vocab_list()
else:
_lowercase = vocab_list
else:
_lowercase = None
_lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , __A ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = super().to_dict()
if isinstance(self.esmfold_config , __A ):
_lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = None
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = 1_28
UpperCAmelCase__ = None
def snake_case ( self : Optional[int] ):
"""simple docstring"""
if self.trunk is None:
_lowercase = TrunkConfig()
elif isinstance(self.trunk , __A ):
_lowercase = TrunkConfig(**self.trunk )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = asdict(self )
_lowercase = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = 48
UpperCAmelCase__ = 10_24
UpperCAmelCase__ = 1_28
UpperCAmelCase__ = 32
UpperCAmelCase__ = 32
UpperCAmelCase__ = 32
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
UpperCAmelCase__ = 4
UpperCAmelCase__ = 1_28
UpperCAmelCase__ = None
def snake_case ( self : List[Any] ):
"""simple docstring"""
if self.structure_module is None:
_lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , __A ):
_lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowercase = self.sequence_state_dim // self.sequence_head_width
_lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = asdict(self )
_lowercase = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = 3_84
UpperCAmelCase__ = 1_28
UpperCAmelCase__ = 16
UpperCAmelCase__ = 1_28
UpperCAmelCase__ = 12
UpperCAmelCase__ = 4
UpperCAmelCase__ = 8
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 8
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 7
UpperCAmelCase__ = 10
UpperCAmelCase__ = 1E-8
UpperCAmelCase__ = 1E5
def snake_case ( self : List[str] ):
"""simple docstring"""
return asdict(self )
def A__ ( ) -> Dict:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 497 |
'''simple docstring'''
import math
def A__ ( A_ , A_ ) -> int:
_lowercase = len(A_ )
_lowercase = int(math.floor(math.sqrt(A_ ) ) )
_lowercase = 0
while arr[min(A_ , A_ ) - 1] < x:
_lowercase = step
step += int(math.floor(math.sqrt(A_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_lowercase = prev + 1
if prev == min(A_ , A_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__magic_name__ : str = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
__magic_name__ : Optional[int] = int(input('''Enter the number to be searched:\n'''))
__magic_name__ : Tuple = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 497 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _A , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Union[str, Any] = CodeGenTokenizer
__SCREAMING_SNAKE_CASE : Tuple = CodeGenTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Dict = {"""add_prefix_space""": True}
__SCREAMING_SNAKE_CASE : Any = False
def UpperCAmelCase__ ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCAmelCase = {"unk_token": "<unk>"}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] , **__UpperCamelCase : Dict ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str ):
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = "lower newer"
return input_text, output_text
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = "lower newer"
# Testing tokenization
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids without special tokens
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids with special tokens
_UpperCAmelCase = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing the unknown token
_UpperCAmelCase = tokens + [rust_tokenizer.unk_token]
_UpperCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] , *__UpperCamelCase : Dict , **__UpperCamelCase : List[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# Simple input
_UpperCAmelCase = "This is a simple input"
_UpperCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
_UpperCAmelCase = ("This is a simple input", "This is a pair")
_UpperCAmelCase = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" , )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCAmelCase = "This is a simple input"
_UpperCAmelCase = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCAmelCase = ("This is a simple input", "This is a pair")
_UpperCAmelCase = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCAmelCase = tokenizer.pad_token_id
_UpperCAmelCase = tokenizer(__UpperCamelCase , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCAmelCase = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncate=__UpperCamelCase , return_tensors="np" )
_UpperCAmelCase = tokenizer(*__UpperCamelCase , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCAmelCase = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncate=__UpperCamelCase , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = "$$$"
_UpperCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__UpperCamelCase , add_bos_token=__UpperCamelCase )
_UpperCAmelCase = "This is a simple input"
_UpperCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
_UpperCAmelCase = tokenizer.bos_token_id
_UpperCAmelCase = tokenizer(__UpperCamelCase )
_UpperCAmelCase = tokenizer(__UpperCamelCase )
self.assertEqual(out_s.input_ids[0] , __UpperCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCAmelCase = tokenizer.decode(out_s.input_ids )
_UpperCAmelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __UpperCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_UpperCAmelCase = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_UpperCAmelCase = "\nif len_a > len_b: result = a\nelse: result = b"
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase )
_UpperCAmelCase = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_UpperCAmelCase = tokenizer.decode(__UpperCamelCase , truncate_before_pattern=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
pass
| 713 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 129 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCamelCase_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : str =AudioClassificationPipeline(model=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
# test with a raw waveform
lowercase : Dict =np.zeros((34000,) )
lowercase : List[Any] =np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =examples
lowercase : Union[str, Any] =audio_classifier(UpperCAmelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCAmelCase__ , [
{'''score''': ANY(UpperCAmelCase__ ), '''label''': ANY(UpperCAmelCase__ )},
{'''score''': ANY(UpperCAmelCase__ ), '''label''': ANY(UpperCAmelCase__ )},
] , )
lowercase : List[str] =audio_classifier(UpperCAmelCase__ , top_k=1 )
self.assertEqual(
UpperCAmelCase__ , [
{'''score''': ANY(UpperCAmelCase__ ), '''label''': ANY(UpperCAmelCase__ )},
] , )
self.run_torchaudio(UpperCAmelCase__ )
@require_torchaudio
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
import datasets
# test with a local file
lowercase : Optional[int] =datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
lowercase : str =dataset[0]['''audio''']['''array''']
lowercase : Tuple =audio_classifier(UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__ , [
{'''score''': ANY(UpperCAmelCase__ ), '''label''': ANY(UpperCAmelCase__ )},
{'''score''': ANY(UpperCAmelCase__ ), '''label''': ANY(UpperCAmelCase__ )},
] , )
@require_torch
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] ='''anton-l/wav2vec2-random-tiny-classifier'''
lowercase : Any =pipeline('''audio-classification''' , model=UpperCAmelCase__ )
lowercase : Any =np.ones((8000,) )
lowercase : Tuple =audio_classifier(UpperCAmelCase__ , top_k=4 )
lowercase : Any =[
{'''score''': 0.08_42, '''label''': '''no'''},
{'''score''': 0.08_38, '''label''': '''up'''},
{'''score''': 0.08_37, '''label''': '''go'''},
{'''score''': 0.08_34, '''label''': '''right'''},
]
lowercase : str =[
{'''score''': 0.08_45, '''label''': '''stop'''},
{'''score''': 0.08_44, '''label''': '''on'''},
{'''score''': 0.08_41, '''label''': '''right'''},
{'''score''': 0.08_34, '''label''': '''left'''},
]
self.assertIn(nested_simplify(UpperCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase : Optional[int] ={'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase : Union[str, Any] =audio_classifier(UpperCAmelCase__ , top_k=4 )
self.assertIn(nested_simplify(UpperCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
import datasets
lowercase : List[Any] ='''superb/wav2vec2-base-superb-ks'''
lowercase : str =pipeline('''audio-classification''' , model=UpperCAmelCase__ )
lowercase : Optional[int] =datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
lowercase : Optional[int] =np.array(dataset[3]['''speech'''] , dtype=np.floataa )
lowercase : Optional[Any] =audio_classifier(UpperCAmelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=3 ) , [
{'''score''': 0.9_81, '''label''': '''go'''},
{'''score''': 0.0_07, '''label''': '''up'''},
{'''score''': 0.0_06, '''label''': '''_unknown_'''},
{'''score''': 0.0_01, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
| 92 |
"""simple docstring"""
from math import ceil
def __magic_name__ ( UpperCamelCase : int = 1001 ) -> int:
a__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
a__ = 2 * i + 1
a__ = 2 * i
a__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 273 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : List[Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
UpperCamelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
))
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''', type=_UpperCAmelCase, default=1, help='''Number of TPU cores to use (1 or 8).''')
# positional
parser.add_argument(
'''training_script''', type=_UpperCAmelCase, help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
), )
# rest from the training program
parser.add_argument('''training_script_args''', nargs=_UpperCAmelCase)
return parser.parse_args()
def __snake_case ( ):
UpperCamelCase = parse_args()
# Import training_script as a module.
UpperCamelCase = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
UpperCamelCase = script_fpath.stem
UpperCamelCase = importlib.import_module(_UpperCAmelCase)
# Patch sys.argv
UpperCamelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores)]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 350 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowercase : List[Any] =1_0
def A__ ( lowercase: int, lowercase: int, lowercase: list[int], lowercase: int ) -> int:
for i in range(__A, __A ):
if array[i] == target:
return i
return -1
def A__ ( lowercase: list[int], lowercase: int ) -> int:
A : Optional[int] =0
A : str =len(__A )
while left <= right:
if right - left < precision:
return lin_search(__A, __A, __A, __A )
A : Union[str, Any] =(left + right) // 3 + 1
A : List[Any] =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A : str =one_third - 1
elif array[two_third] < target:
A : str =two_third + 1
else:
A : Optional[int] =one_third + 1
A : List[str] =two_third - 1
else:
return -1
def A__ ( lowercase: int, lowercase: int, lowercase: list[int], lowercase: int ) -> int:
if left < right:
if right - left < precision:
return lin_search(__A, __A, __A, __A )
A : Optional[int] =(left + right) // 3 + 1
A : List[str] =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__A, one_third - 1, __A, __A )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, __A, __A, __A )
else:
return rec_ternary_search(one_third + 1, two_third - 1, __A, __A )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] =input('''Enter numbers separated by comma:\n''').strip()
_lowercase : Any =[int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowercase : List[Any] =int(input('''Enter the number to be found in the list:\n''').strip())
_lowercase : Dict =ite_ternary_search(collection, target)
_lowercase : Dict =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 305 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = KandinskyVaaPipeline
lowercase_ = [
"""image_embeds""",
"""negative_image_embeds""",
]
lowercase_ = ["""image_embeds""", """negative_image_embeds"""]
lowercase_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ = False
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 3_2
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 3_2
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 1_0_0
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__A =UNetaDConditionModel(**lowercase__ )
return model
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.dummy_unet
__A =self.dummy_movq
__A =DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase__ , )
__A ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self , lowercase__ , lowercase__=0 ):
'''simple docstring'''
__A =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
__A =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase__ )
if str(lowercase__ ).startswith('''mps''' ):
__A =torch.manual_seed(lowercase__ )
else:
__A =torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__A ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self ):
'''simple docstring'''
__A ='''cpu'''
__A =self.get_dummy_components()
__A =self.pipeline_class(**lowercase__ )
__A =pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__A =pipe(**self.get_dummy_inputs(lowercase__ ) )
__A =output.images
__A =pipe(
**self.get_dummy_inputs(lowercase__ ) , return_dict=lowercase__ , )[0]
__A =image[0, -3:, -3:, -1]
__A =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__A =np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
__A =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase__ )
__A =KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__A =pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
__A ='''red cat, 4k photo'''
__A =torch.Generator(device='''cuda''' ).manual_seed(0 )
__A , __A =pipe_prior(
lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__A =torch.Generator(device='''cuda''' ).manual_seed(0 )
__A =pipeline(
image_embeds=lowercase__ , negative_image_embeds=lowercase__ , generator=lowercase__ , num_inference_steps=1_0_0 , output_type='''np''' , )
__A =output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
| 184 | 0 |
from math import factorial
_UpperCamelCase : List[str] ={str(digit): factorial(digit) for digit in range(10)}
def lowerCamelCase_ ( A_ ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCamelCase ) )
def lowerCamelCase_ ( A_ = 60 , A_ = 1_00_00_00 ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
__lowerCamelCase = 0
# the cached sizes of the previous chains
__lowerCamelCase = {}
for start_chain_element in range(1 , __UpperCamelCase ):
# The temporary set will contain the elements of the chain
__lowerCamelCase = set()
__lowerCamelCase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__lowerCamelCase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__UpperCamelCase )
chain_set_length += 1
__lowerCamelCase = digit_factorial_sum(__UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__lowerCamelCase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 708 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCamelCase : Optional[int] =version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
_UpperCamelCase : Any ="\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
_UpperCamelCase : Optional[Any] ="\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
_UpperCamelCase : List[str] ="\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case=0.9 , _snake_case=3 , _snake_case=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
__lowerCamelCase = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) , word_tokenize(_snake_case ) , alpha=_snake_case , beta=_snake_case , gamma=_snake_case )
for ref, pred in zip(_snake_case , _snake_case )
]
else:
__lowerCamelCase = [
meteor_score.single_meteor_score(_snake_case , _snake_case , alpha=_snake_case , beta=_snake_case , gamma=_snake_case )
for ref, pred in zip(_snake_case , _snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 575 | 0 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class lowerCamelCase :
'''simple docstring'''
def __init__(self ):
"""simple docstring"""
UpperCAmelCase__ : Any = False
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if not self.initialized:
UpperCAmelCase__ : List[str] = RagRetriever(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = True
def _a (self ):
"""simple docstring"""
self.retriever.index.init_index()
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.retriever._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(_lowerCamelCase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for worker in self.retrieval_workers
] )
def _a (self ):
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCAmelCase__ : List[str] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCAmelCase__ , UpperCAmelCase__ : str = ray.get(random_worker.retrieve.remote(_lowerCamelCase , _lowerCamelCase ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
return super(_lowerCamelCase , cls ).get_tokenizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = kwargs.pop("""config""" , _lowerCamelCase ) or RagConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = RagTokenizer.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
UpperCAmelCase__ : Tuple = rag_tokenizer.question_encoder
UpperCAmelCase__ : Any = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCAmelCase__ : str = """custom"""
UpperCAmelCase__ : Tuple = CustomHFIndex(config.retrieval_vector_size , _lowerCamelCase )
else:
UpperCAmelCase__ : List[str] = cls._build_index(_lowerCamelCase )
return cls(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , retrieval_workers=_lowerCamelCase , index=_lowerCamelCase , )
| 182 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 182 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''rwkv'''
SCREAMING_SNAKE_CASE__ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : int , lowercase : Optional[Any]=5_02_77 , lowercase : Union[str, Any]=10_24 , lowercase : Union[str, Any]=40_96 , lowercase : List[str]=32 , lowercase : str=None , lowercase : Tuple=None , lowercase : Dict=1E-5 , lowercase : Any=0 , lowercase : List[Any]=0 , lowercase : int=6 , lowercase : Dict=False , lowercase : Dict=True , **lowercase : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : List[str] = context_length
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase : List[Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Optional[int] = rescale_every
UpperCAmelCase : Tuple = use_cache
UpperCAmelCase : Union[str, Any] = bos_token_id
UpperCAmelCase : Optional[int] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
| 710 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class snake_case__ :
SCREAMING_SNAKE_CASE__ = BlenderbotSmallConfig
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = '''gelu'''
def __init__( self : Tuple , lowercase : Any , lowercase : Optional[Any]=13 , lowercase : Any=7 , lowercase : List[Any]=True , lowercase : List[str]=False , lowercase : Optional[Any]=99 , lowercase : Union[str, Any]=32 , lowercase : List[Any]=2 , lowercase : Tuple=4 , lowercase : Union[str, Any]=37 , lowercase : str=0.1 , lowercase : Optional[Any]=0.1 , lowercase : Optional[Any]=20 , lowercase : str=2 , lowercase : int=1 , lowercase : Optional[int]=0 , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : List[Any] = pad_token_id
UpperCAmelCase : Union[str, Any] = bos_token_id
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : Optional[Any] = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCAmelCase ( self : int , lowercase : List[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
UpperCAmelCase : List[Any] = inputs_dict["input_ids"]
UpperCAmelCase : int = input_ids[:1, :]
UpperCAmelCase : Union[str, Any] = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase : Union[str, Any] = inputs_dict["head_mask"]
UpperCAmelCase : List[Any] = 1
# first forward pass
UpperCAmelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
UpperCAmelCase , UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
UpperCAmelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Tuple=None , _lowercase : Any=None , _lowercase : str=None , _lowercase : Optional[Any]=None , _lowercase : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : List[str] = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = TFBlenderbotSmallModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
SCREAMING_SNAKE_CASE__ = '''facebook/blenderbot_small-90M'''
@cached_property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.tokenizer(self.src_text , return_tensors="tf" )
UpperCAmelCase : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
UpperCAmelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 292 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
a_ : Union[str, Any] = 10
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
a_ : int = [1, 2, 3, 4]
a_ : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
a_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
a_ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
a_ : Any = '''It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'''
a_ , a_ : Union[str, Any] = process_story(lowercase_ )
self.assertEqual(lowercase_ , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
a_ : Dict = ''''''
a_ , a_ : Tuple = process_story(lowercase_ )
self.assertEqual(lowercase_ , [] )
self.assertEqual(lowercase_ , [] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : Optional[Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
a_ , a_ : int = process_story(lowercase_ )
a_ : Optional[Any] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(lowercase_ , lowercase_ )
a_ : int = ['''It was the best of times.''']
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : Union[str, Any] = torch.tensor([1, 2, 3, 4] )
a_ : Any = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase_ , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_ , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
a_ : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_ , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
a_ : int = 101
a_ : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ : str = compute_token_type_ids(lowercase_ , lowercase_ )
np.testing.assert_array_equal(lowercase_ , lowercase_ )
| 466 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Union[str, Any] = '''resnet'''
__a: List[Any] = ['''basic''', '''bottleneck''']
def __init__( self , lowercase_=3 , lowercase_=6_4 , lowercase_=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowercase_=[3, 4, 6, 3] , lowercase_="bottleneck" , lowercase_="relu" , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embedding_size
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = depths
lowerCAmelCase_ = layer_type
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = downsample_in_first_stage
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[int] = version.parse('''1.11''' )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowercase ( self ) -> float:
'''simple docstring'''
return 1e-3
| 318 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
def __A ( a_ : str , a_ : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = set()
SCREAMING_SNAKE_CASE : Optional[Any] = []
def parse_line(a_ : int ):
for line in fp:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(_lowercase ) > 0:
SCREAMING_SNAKE_CASE : List[str] = '\n'.join(_lowercase )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(_lowercase )
buffer.clear()
continue
else:
SCREAMING_SNAKE_CASE : Any = line.strip()
buffer.append(_lowercase )
if from_gh:
for filename in os.listdir(_lowercase ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , _lowercase )
if not os.path.isdir(_lowercase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowercase ) as fp:
parse_line(_lowercase )
else:
try:
with zipfile.ZipFile(_lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowercase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowercase ) as fp:
parse_line(_lowercase )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def __A ( a_ : Optional[int] , a_ : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = set()
SCREAMING_SNAKE_CASE : Any = [os.path.join(_lowercase , _lowercase ) for p in os.listdir(_lowercase ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowercase , _lowercase ) )
return selected_warnings
if __name__ == "__main__":
def __A ( a_ : Dict )-> Optional[Any]:
'''simple docstring'''
return values.split(''',''' )
lowerCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
lowerCamelCase__ : Any = parser.parse_args()
lowerCamelCase__ : Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCamelCase__ : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCamelCase__ : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
lowerCamelCase__ : str = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 704 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
import functools
from typing import Any
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
# Validation
if not isinstance(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) or not all(
isinstance(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
UpperCAmelCase__: dict[str, Any] = {}
UpperCAmelCase__: Dict = "WORD_KEEPER"
for word in words:
UpperCAmelCase__: Tuple = trie
for c in word:
if c not in trie_node:
UpperCAmelCase__: List[Any] = {}
UpperCAmelCase__: Optional[Any] = trie_node[c]
UpperCAmelCase__: Dict = True
UpperCAmelCase__: List[Any] = len(SCREAMING_SNAKE_CASE )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE ) -> bool:
if index == len_string:
return True
UpperCAmelCase__: Any = trie
for i in range(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = trie_node.get(string[i] ,SCREAMING_SNAKE_CASE )
if trie_node is None:
return False
if trie_node.get(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 113 |
_lowerCAmelCase : int ="""
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCAmelCase : List[str] =[{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCAmelCase : int ={
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
} | 113 | 1 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( _snake_case, _snake_case, _snake_case ):
__UpperCamelCase : List[Any] = [R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self : List[str] ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : int = 5_0257 ,lowerCamelCase : int = 1024 ,lowerCamelCase : int = 768 ,lowerCamelCase : int = 12 ,lowerCamelCase : int = 12 ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : str = "gelu_new" ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 1E-5 ,lowerCamelCase : float = 0.02 ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
__SCREAMING_SNAKE_CASE = prefix_inner_dim
__SCREAMING_SNAKE_CASE = prefix_hidden_dim
__SCREAMING_SNAKE_CASE = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__SCREAMING_SNAKE_CASE = (
nn.Linear(self.prefix_hidden_dim ,lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__SCREAMING_SNAKE_CASE = GPTaConfig(
vocab_size=lowerCamelCase ,n_positions=lowerCamelCase ,n_embd=lowerCamelCase ,n_layer=lowerCamelCase ,n_head=lowerCamelCase ,n_inner=lowerCamelCase ,activation_function=lowerCamelCase ,resid_pdrop=lowerCamelCase ,embd_pdrop=lowerCamelCase ,attn_pdrop=lowerCamelCase ,layer_norm_epsilon=lowerCamelCase ,initializer_range=lowerCamelCase ,scale_attn_weights=lowerCamelCase ,use_cache=lowerCamelCase ,scale_attn_by_inverse_layer_idx=lowerCamelCase ,reorder_and_upcast_attn=lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = GPTaLMHeadModel(lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : torch.Tensor ,lowerCamelCase : torch.Tensor ,lowerCamelCase : Optional[torch.Tensor] = None ,lowerCamelCase : Optional[torch.Tensor] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.encode_prefix(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.decode_prefix(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
__SCREAMING_SNAKE_CASE = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
__SCREAMING_SNAKE_CASE = torch.cat((dummy_token, input_ids) ,dim=1 )
__SCREAMING_SNAKE_CASE = self.transformer(inputs_embeds=lowerCamelCase ,labels=lowerCamelCase ,attention_mask=lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase__ ( self : int ,lowerCamelCase : int ,lowerCamelCase : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase ,self.prefix_length ,dtype=torch.intaa ,device=lowerCamelCase )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.split(lowerCamelCase ,1 ,dim=0 )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for feature in features:
__SCREAMING_SNAKE_CASE = self.decode_prefix(feature.to(lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.generate_beam(
input_embeds=lowerCamelCase ,device=lowerCamelCase ,eos_token_id=lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__SCREAMING_SNAKE_CASE = torch.stack(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.stack(lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase__ ( self : str ,lowerCamelCase : int=None ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : List[str]=None ,lowerCamelCase : int = 5 ,lowerCamelCase : int = 67 ,lowerCamelCase : float = 1.0 ,lowerCamelCase : Optional[int] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = torch.ones(lowerCamelCase ,device=lowerCamelCase ,dtype=torch.int )
__SCREAMING_SNAKE_CASE = torch.zeros(lowerCamelCase ,device=lowerCamelCase ,dtype=torch.bool )
if input_embeds is not None:
__SCREAMING_SNAKE_CASE = input_embeds
else:
__SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(lowerCamelCase )
for i in range(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = self.transformer(inputs_embeds=lowerCamelCase )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__SCREAMING_SNAKE_CASE = logits.softmax(-1 ).log()
if scores is None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = logits.topk(lowerCamelCase ,-1 )
__SCREAMING_SNAKE_CASE = generated.expand(lowerCamelCase ,*generated.shape[1:] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
__SCREAMING_SNAKE_CASE = next_tokens
else:
__SCREAMING_SNAKE_CASE = tokens.expand(lowerCamelCase ,*tokens.shape[1:] )
__SCREAMING_SNAKE_CASE = torch.cat((tokens, next_tokens) ,dim=1 )
else:
__SCREAMING_SNAKE_CASE = -float(np.inf )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__SCREAMING_SNAKE_CASE = scores_sum / seq_lengths[:, None]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = scores_sum_average.view(-1 ).topk(lowerCamelCase ,-1 )
__SCREAMING_SNAKE_CASE = next_tokens // scores_sum.shape[1]
__SCREAMING_SNAKE_CASE = seq_lengths[next_tokens_source]
__SCREAMING_SNAKE_CASE = next_tokens % scores_sum.shape[1]
__SCREAMING_SNAKE_CASE = next_tokens.unsqueeze(1 )
__SCREAMING_SNAKE_CASE = tokens[next_tokens_source]
__SCREAMING_SNAKE_CASE = torch.cat((tokens, next_tokens) ,dim=1 )
__SCREAMING_SNAKE_CASE = generated[next_tokens_source]
__SCREAMING_SNAKE_CASE = scores_sum_average * seq_lengths
__SCREAMING_SNAKE_CASE = is_stopped[next_tokens_source]
__SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
__SCREAMING_SNAKE_CASE = torch.cat((generated, next_token_embed) ,dim=1 )
__SCREAMING_SNAKE_CASE = is_stopped + next_tokens.eq(lowerCamelCase ).squeeze()
if is_stopped.all():
break
__SCREAMING_SNAKE_CASE = scores / seq_lengths
__SCREAMING_SNAKE_CASE = scores.argsort(descending=lowerCamelCase )
# tokens tensors are already padded to max_seq_length
__SCREAMING_SNAKE_CASE = [tokens[i] for i in order]
__SCREAMING_SNAKE_CASE = torch.stack(lowerCamelCase ,dim=0 )
__SCREAMING_SNAKE_CASE = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 13 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a__ : Optional[Any] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
lowerCamelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowerCamelCase__ = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] )
lowerCamelCase__ = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowerCamelCase__ = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
# Legacy behavior
lowerCamelCase__ = text_classifier("""This is great !""" , return_all_scores=_lowerCAmelCase )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowerCamelCase__ = text_classifier("""This is great !""" , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] )
lowerCamelCase__ = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowerCamelCase__ = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
] , )
@require_torch
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
import torch
lowerCamelCase__ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
lowerCamelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@require_tf
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
lowerCamelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@slow
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = pipeline("""text-classification""" )
lowerCamelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowerCamelCase__ = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowerCamelCase__ = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
@slow
@require_tf
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = pipeline("""text-classification""" , framework="""tf""" )
lowerCamelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowerCamelCase__ = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowerCamelCase__ = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
def _UpperCamelCase ( self : Optional[Any] , a_ : Any , a_ : Any , a_ : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = TextClassificationPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _UpperCamelCase ( self : int , a_ : int , a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCamelCase__ = """HuggingFace is in"""
lowerCamelCase__ = text_classifier(_lowerCAmelCase )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"""label""": ANY(_lowerCAmelCase ), """score""": ANY(_lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
lowerCamelCase__ = ["""HuggingFace is in """, """Paris is in France"""]
lowerCamelCase__ = text_classifier(_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"""label""": ANY(_lowerCAmelCase ), """score""": ANY(_lowerCAmelCase )}, {"""label""": ANY(_lowerCAmelCase ), """score""": ANY(_lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCamelCase__ = text_classifier(_lowerCAmelCase , top_k=_lowerCAmelCase )
lowerCamelCase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [[{"""label""": ANY(_lowerCAmelCase ), """score""": ANY(_lowerCAmelCase )}] * N, [{"""label""": ANY(_lowerCAmelCase ), """score""": ANY(_lowerCAmelCase )}] * N] , )
lowerCamelCase__ = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowerCamelCase__ = text_classifier(_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {"""label""": ANY(_lowerCAmelCase ), """score""": ANY(_lowerCAmelCase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCamelCase__ = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowerCAmelCase ):
text_classifier(_lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCamelCase__ = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"""label""": ANY(_lowerCAmelCase ), """score""": ANY(_lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 165 |
'''simple docstring'''
import argparse
import os
import re
lowercase_ : Optional[Any] = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowercase_ : int = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ : Optional[int] = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ : List[Any] = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ : List[str] = re.compile(r'''\[([^\]]+)\]''')
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
lowercase = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : int="" , lowercase_ : int=None , lowercase_ : List[Any]=None ):
lowercase = 0
lowercase = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
lowercase = ["""\n""".join(lines[:index] )]
else:
lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
lowercase = [lines[index + 1]]
index += 1
else:
lowercase = []
else:
blocks.append("""\n""".join(lowercase_ ) )
lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append("""\n""".join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
def _inner(lowercase_ : str ):
return key(lowercase_ ).lower().replace("""_""" , """""" )
return _inner
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : List[str]=None ):
# If no key is provided, we use a noop.
def noop(lowercase_ : int ):
return x
if key is None:
lowercase = noop
# Constants are all uppercase, they go first.
lowercase = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
lowercase = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
# This inner function sort imports between [ ].
def _replace(lowercase_ : Union[str, Any] ):
lowercase = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase_ )] ) + "]"
lowercase = import_statement.split("""\n""" )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase = 2 if lines[1].strip() == """[""" else 1
lowercase = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase = keys[:-1]
lowercase = get_indent(lines[1] ) + """, """.join([F"""\"{k}\"""" for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : List[str]=True ):
with open(lowercase_ , encoding="""utf-8""" ) as f:
lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase = split_code_in_indented_blocks(
lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase = main_blocks[block_idx]
lowercase = block.split("""\n""" )
# Get to the start of the imports.
lowercase = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase = """\n""".join(block_lines[line_idx:-1] )
lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
lowercase = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase = 0
lowercase = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase_ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any]=True ):
lowercase = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
lowercase = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ )
if result:
lowercase = [os.path.join(lowercase_ , """__init__.py""" )]
if len(lowercase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowercase_ )} files, run `make style`.""" )
if __name__ == "__main__":
lowercase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase_ : Dict = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 588 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE_ : Dict = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Any = ['model.decoder.embed_positions.weights']
def _snake_case ( UpperCAmelCase_ : Optional[int] ):
if "emb" in name:
A__ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
A__ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
A__ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
A__ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
A__ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
A__ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
A__ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
A__ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
A__ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
A__ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
A__ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _snake_case ( UpperCAmelCase_ : OrderedDict , UpperCAmelCase_ : int ):
A__ = list(state_dict.keys() )
A__ = {}
for key in keys:
A__ = state_dict.pop(UpperCAmelCase_ )
A__ = rename_keys(UpperCAmelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
A__ = val[:hidden_size, :]
A__ = val[hidden_size : 2 * hidden_size, :]
A__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A__ = val
else:
A__ = val
return state_dict, enc_dec_proj_state_dict
def _snake_case ( UpperCAmelCase_ : str ):
if checkpoint == "small":
# default config values
A__ = 1024
A__ = 24
A__ = 16
elif checkpoint == "medium":
A__ = 1536
A__ = 48
A__ = 24
elif checkpoint == "large":
A__ = 2048
A__ = 48
A__ = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
A__ = MusicgenDecoderConfig(
hidden_size=UpperCAmelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCAmelCase_ , num_attention_heads=UpperCAmelCase_ , )
return config
@torch.no_grad()
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any="cpu" ):
A__ = MusicGen.get_pretrained(UpperCAmelCase_ , device=UpperCAmelCase_ )
A__ = decoder_config_from_checkpoint(UpperCAmelCase_ )
A__ = fairseq_model.lm.state_dict()
A__ , A__ = rename_state_dict(
UpperCAmelCase_ , hidden_size=decoder_config.hidden_size )
A__ = TaEncoderModel.from_pretrained("""t5-base""" )
A__ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
A__ = MusicgenForCausalLM(UpperCAmelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A__ , A__ = decoder.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCAmelCase_ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
A__ = MusicgenForConditionalGeneration(text_encoder=UpperCAmelCase_ , audio_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCAmelCase_ )
# check we can do a forward pass
A__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A__ = model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
A__ = AutoTokenizer.from_pretrained("""t5-base""" )
A__ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
A__ = MusicgenProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
# set the appropriate bos/pad token ids
A__ = 2048
A__ = 2048
# set other default generation config params
A__ = int(30 * audio_encoder.config.frame_rate )
A__ = True
A__ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCAmelCase_ )
processor.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 500 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'bert-generation'
def __init__(self , A=50_358 , A=1_024 , A=24 , A=16 , A=4_096 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=0.02 , A=1E-12 , A=0 , A=2 , A=1 , A="absolute" , A=True , **A , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
| 11 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = set()
__a = []
def parse_line(lowerCAmelCase__ ):
for line in fp:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase__ ) > 0:
__a = """\n""".join(lowerCAmelCase__ )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(lowerCAmelCase__ )
buffer.clear()
continue
else:
__a = line.strip()
buffer.append(lowerCAmelCase__ )
if from_gh:
for filename in os.listdir(lowerCAmelCase__ ):
__a = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase__ ) as fp:
parse_line(lowerCAmelCase__ )
else:
try:
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase__ ) as fp:
parse_line(lowerCAmelCase__ )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = set()
__a = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase__ , lowerCAmelCase__ ) )
return selected_warnings
if __name__ == "__main__":
def a (lowerCAmelCase__ ):
return values.split(""",""" )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
SCREAMING_SNAKE_CASE = extract_warnings(args.output_dir, args.targets)
SCREAMING_SNAKE_CASE = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 709 |
from __future__ import annotations
SCREAMING_SNAKE_CASE = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A ):
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def snake_case_ ( self ):
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__A )
__a = vertex
queue.append(__A )
def snake_case_ ( self , __A ):
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__A )
if target_vertex_parent is None:
__a = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__A )
return self.shortest_path(__A ) + f'''->{target_vertex}'''
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 209 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.