code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : str):
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def __snake_case ( _UpperCAmelCase : str):
UpperCamelCase = credit_card_number
UpperCamelCase = 0
UpperCamelCase = len(_UpperCAmelCase) - 2
for i in range(_UpperCAmelCase, -1, -2):
# double the value of every second digit
UpperCamelCase = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase = cc_number[:i] + str(_UpperCAmelCase) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase) - 1, -1, -2):
total += int(cc_number[i])
return total % 10 == 0
def __snake_case ( _UpperCAmelCase : str):
UpperCamelCase = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.')
return False
if not 13 <= len(_UpperCAmelCase) <= 16:
print(f'{error_message} of its length.')
return False
if not validate_initial_digits(_UpperCAmelCase):
print(f'{error_message} of its first two digits.')
return False
if not luhn_validation(_UpperCAmelCase):
print(f'{error_message} it fails the Luhn check.')
return False
print(f'{credit_card_number} is a valid credit card number.')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 212 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int):
UpperCamelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 212 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( _lowercase : dict , _lowercase : str ) -> str:
__UpperCAmelCase, __UpperCAmelCase: Dict = set(snake_case__ ), [start]
while stack:
__UpperCAmelCase: int = stack.pop()
explored.add(snake_case__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(snake_case__ )
return explored
SCREAMING_SNAKE_CASE_ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A')) | 700 | '''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ = ['small', 'medium', 'large']
SCREAMING_SNAKE_CASE_ = 'lm_head.decoder.weight'
SCREAMING_SNAKE_CASE_ = 'lm_head.weight'
def UpperCamelCase__ ( _lowercase : str , _lowercase : str ) -> List[str]:
__UpperCAmelCase: Tuple = torch.load(_lowercase )
__UpperCAmelCase: Tuple = d.pop(_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
torch.save(_lowercase , os.path.join(_lowercase , _lowercase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
SCREAMING_SNAKE_CASE_ = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
SCREAMING_SNAKE_CASE_ = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
) | 466 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCamelCase__ ( _UpperCAmelCase ):
def __lt__(self : Union[str, Any] , snake_case_ : int ):
return self[-1] < other[-1]
def __eq__(self : List[str] , snake_case_ : Any ):
return self[-1] == other[-1]
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : List[Any] = []
# sort into stacks
for element in collection:
__a : List[Any] = Stack([element] )
__a : Tuple = bisect_left(_A , _A )
if i != len(_A ):
stacks[i].append(_A )
else:
stacks.append(_A )
# use a heap-based merge to merge stack efficiently
__a : Optional[Any] = merge(*(reversed(_A ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase__ =input('Enter numbers separated by a comma:\n').strip()
lowercase__ =[int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 521 | import numpy as np
def __lowerCAmelCase ( _A ,_A ,_A = 1E-12 ,_A = 100 ,):
"""simple docstring"""
assert np.shape(_A )[0] == np.shape(_A )[1]
# Ensure proper dimensionality.
assert np.shape(_A )[0] == np.shape(_A )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_A ) == np.iscomplexobj(_A )
_lowercase = np.iscomplexobj(_A )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_A ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowercase = False
_lowercase = 0
_lowercase = 0
_lowercase = 1E12
while not convergence:
# Multiple matrix by the vector.
_lowercase = np.dot(_A ,_A )
# Normalize the resulting output vector.
_lowercase = w / np.linalg.norm(_A )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowercase = vector.conj().T if is_complex else vector.T
_lowercase = np.dot(_A ,np.dot(_A ,_A ) )
# Check convergence.
_lowercase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowercase = True
_lowercase = lambda_
if is_complex:
_lowercase = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowercase = np.array([41, 4, 20] )
_lowercase = real_input_matrix.astype(np.complexaaa )
_lowercase = np.triu(1J * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowercase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowercase = real_input_matrix
_lowercase = real_vector
elif problem_type == "complex":
_lowercase = complex_input_matrix
_lowercase = complex_vector
# Our implementation.
_lowercase , _lowercase = power_iteration(_A ,_A )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowercase , _lowercase = np.linalg.eigh(_A )
# Last eigenvalue is the maximum one.
_lowercase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowercase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_A ) - np.abs(_A ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 398 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Dict = ['''pixel_values''']
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ):
super().__init__(**snake_case )
snake_case_ = size if size is not None else {'height': 384, 'width': 384}
snake_case_ = get_size_dict(snake_case , default_to_square=snake_case )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ = do_convert_rgb
def a ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ):
snake_case_ = get_size_dict(snake_case , default_to_square=snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
snake_case_ = (size['height'], size['width'])
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def a ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def a ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def a ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(snake_case , default_to_square=snake_case )
snake_case_ = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(snake_case ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
snake_case_ = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
snake_case_ = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case )
return encoded_outputs
| 108 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=1 / 255 , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_pad
def a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a ( self , snake_case , snake_case=False ):
if not batched:
snake_case_ = image_inputs[0]
if isinstance(snake_case , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(snake_case , key=lambda snake_case : item[0] )[0]
snake_case_ = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = DetrImageProcessor if is_vision_available() else None
def a ( self ):
snake_case_ = DetrImageProcessingTester(self )
@property
def a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case , 'rescale_factor' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_pad' ) )
def a ( self ):
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case )
def a ( self ):
pass
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self ):
# prepare image and target
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'image_id': 3_9769, 'annotations': target}
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
@slow
def a ( self ):
# prepare image, target and masks_path
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , masks_path=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify masks
snake_case_ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
| 108 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : int ):
lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase ).loss
lowerCAmelCase = -tf.math.reduce_mean(lowerCAmelCase ).numpy()
lowerCAmelCase = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 169 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = (DDIMParallelScheduler,)
_a = (('eta', 0.0), ('num_inference_steps', 50))
def __lowercase ( self : Optional[int] , **lowerCAmelCase : List[str] ):
lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**lowerCAmelCase )
return config
def __lowercase ( self : Any , **lowerCAmelCase : Tuple ):
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase )
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
lowerCAmelCase , lowerCAmelCase = 10, 0.0
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for t in scheduler.timesteps:
lowerCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def __lowercase ( self : Dict ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def __lowercase ( self : Dict ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __lowercase ( self : int ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def __lowercase ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase )
def __lowercase ( self : List[str] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase )
def __lowercase ( self : Tuple ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
self.check_over_configs(thresholding=lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def __lowercase ( self : List[str] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase )
def __lowercase ( self : List[str] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase )
def __lowercase ( self : Any ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def __lowercase ( self : Any ):
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
lowerCAmelCase , lowerCAmelCase = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = self.dummy_sample_deter + 0.1
lowerCAmelCase = self.dummy_sample_deter - 0.1
lowerCAmelCase = samplea.shape[0]
lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase = torch.arange(lowerCAmelCase )[0:3, None].repeat(1 , lowerCAmelCase )
lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCAmelCase )
lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def __lowercase ( self : Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01 )
lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def __lowercase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01 )
lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 169 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 595 | """simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : int=3 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : int="divided_space_time" , UpperCamelCase__ : Tuple=None , ):
A__ : str =parent
A__ : str =batch_size
A__ : Any =image_size
A__ : Union[str, Any] =num_channels
A__ : str =patch_size
A__ : Union[str, Any] =num_frames
A__ : Any =is_training
A__ : Optional[int] =use_labels
A__ : Optional[int] =hidden_size
A__ : Union[str, Any] =num_hidden_layers
A__ : List[str] =num_attention_heads
A__ : Tuple =intermediate_size
A__ : List[Any] =hidden_act
A__ : str =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Dict =attention_type
A__ : str =initializer_range
A__ : str =scope
A__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
A__ : Optional[Any] =(image_size // patch_size) ** 2
A__ : List[Any] =(num_frames) * self.num_patches_per_frame + 1
def _UpperCAmelCase ( self : str ):
A__ : Dict =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A__ : List[Any] =None
if self.use_labels:
A__ : List[str] =ids_tensor([self.batch_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
A__ : Tuple =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
A__ : Tuple =self.num_labels
return config
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
A__ : Union[str, Any] =TimesformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] =model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ):
A__ : Union[str, Any] =TimesformerForVideoClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : int =model(UpperCamelCase__ )
# verify the logits shape
A__ : Optional[int] =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : int =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : int ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__magic_name__ : Optional[Any] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__magic_name__ : int = False
__magic_name__ : Optional[Any] = False
__magic_name__ : int = False
__magic_name__ : Tuple = False
def _UpperCAmelCase ( self : List[str] ):
A__ : Optional[Any] =TimesformerModelTester(self )
A__ : int =ConfigTester(
self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=False ):
A__ : str =copy.deepcopy(UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _UpperCAmelCase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Union[str, Any] ):
pass
def _UpperCAmelCase ( self : Tuple ):
A__ , A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Dict =model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ , A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[int] =model_class(UpperCamelCase__ )
A__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Any =[*signature.parameters.keys()]
A__ : Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] ):
A__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*UpperCamelCase__ )
@slow
def _UpperCAmelCase ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Dict =TimesformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict ):
if not self.has_attentions:
pass
else:
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[Any] =True
for model_class in self.all_model_classes:
A__ : Tuple =self.model_tester.seq_length
A__ : Optional[int] =self.model_tester.num_frames
A__ : List[Any] =True
A__ : Optional[Any] =False
A__ : List[Any] =True
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Any =True
A__ : int =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : str =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[Any] =True
A__ : Optional[Any] =True
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[int] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _UpperCAmelCase ( self : Any ):
def check_hidden_states_output(UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
A__ : Any =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : Optional[int] =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
A__ : List[Any] =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : Any =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
A__ : Union[str, Any] =np.load(UpperCamelCase )
return list(UpperCamelCase )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self : List[Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Any =TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
UpperCamelCase__ )
A__ : Dict =self.default_image_processor
A__ : Tuple =prepare_video()
A__ : Dict =image_processor(video[:8] , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Optional[int] =model(**UpperCamelCase__ )
# verify the logits
A__ : Optional[Any] =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Dict =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 595 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int = 1_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : str = set(range(3, A_, 2 ) )
primes.add(2 )
for p in range(3, A_, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, A_, A_ ) ) )
_lowerCamelCase : Tuple = [float(A_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A_, limit + 1, A_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 0 |
'''simple docstring'''
import os
from distutils.util import strtobool
def lowercase_ ( lowercase__ , lowercase__ ) ->Optional[Any]:
for e in env_keys:
_snake_case: List[str] = int(os.environ.get(lowercase__ , -1 ) )
if val >= 0:
return val
return default
def lowercase_ ( lowercase__ , lowercase__=False ) ->List[Any]:
_snake_case: Tuple = os.environ.get(lowercase__ , str(lowercase__ ) )
return strtobool(lowercase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase_ ( lowercase__ , lowercase__="no" ) ->int:
_snake_case: List[str] = os.environ.get(lowercase__ , str(lowercase__ ) )
return value
| 721 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
A : Dict = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
A : int = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
A : Dict = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 273 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
UpperCamelCase__ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'albert'
def __init__( self : Any , _A : List[Any]=30_000 , _A : Any=128 , _A : Optional[int]=4_096 , _A : Union[str, Any]=12 , _A : List[Any]=1 , _A : Optional[int]=64 , _A : str=16_384 , _A : Tuple=1 , _A : Any="gelu_new" , _A : Dict=0 , _A : Optional[int]=0 , _A : Dict=512 , _A : str=2 , _A : List[str]=0.0_2 , _A : List[Any]=1e-12 , _A : List[Any]=0.1 , _A : List[str]="absolute" , _A : Optional[Any]=0 , _A : int=2 , _A : Any=3 , **_A : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : List[str] = embedding_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_hidden_groups
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : str = inner_group_num
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : List[Any] = type_vocab_size
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : Tuple = classifier_dropout_prob
UpperCAmelCase__ : Optional[int] = position_embedding_type
class lowerCamelCase_ ( __a ):
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 75 |
"""simple docstring"""
def a__ ( ) -> list[list[int]]:
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__A = generate_large_matrix()
__A = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def a__ ( __SCREAMING_SNAKE_CASE ) -> None:
assert all(row == sorted(__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(__SCREAMING_SNAKE_CASE ) == sorted(__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ) for col in zip(*__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: str = 0
__lowerCAmelCase: Tuple = len(__SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__lowerCAmelCase: Optional[int] = (left + right) // 2
__lowerCAmelCase: List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__lowerCAmelCase: List[str] = mid + 1
else:
__lowerCAmelCase: Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: Optional[int] = len(grid[0] )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase: Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
return len([number for row in grid for number in row if number < 0] )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: List[Any] = 0
for row in grid:
for i, number in enumerate(__SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(__SCREAMING_SNAKE_CASE ) - i
break
return total
def a__ ( ) -> None:
from timeit import timeit
print("Running benchmarks" )
__lowerCAmelCase: Dict = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__lowerCAmelCase: Optional[int] = timeit(F"{func}(grid=grid)" , setup=__SCREAMING_SNAKE_CASE , number=5_0_0 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 346 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : Optional[int] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' ,[
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] ,)
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[Any] ,) -> List[str]:
__snake_case : Optional[Any] = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Union[str, Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : List[Any] = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
assert base_extractor.is_extractable(_UpperCAmelCase )
__snake_case : List[Any] = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(_UpperCAmelCase ,_UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Any = file_path.read_text(encoding='utf-8' )
else:
__snake_case : str = output_path.read_text(encoding='utf-8' )
__snake_case : Any = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' ,[
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] ,)
def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict ,) -> List[Any]:
__snake_case : List[str] = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
__snake_case : List[Any] = input_paths[compression_format]
if input_path is None:
__snake_case : Tuple = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
__snake_case : Optional[Any] = Extractor.infer_extractor_format(_UpperCAmelCase )
assert extractor_format is not None
__snake_case : Union[str, Any] = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Optional[Any] = file_path.read_text(encoding='utf-8' )
else:
__snake_case : List[Any] = output_path.read_text(encoding='utf-8' )
__snake_case : Dict = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
import tarfile
__snake_case : List[str] = tmp_path / 'data_dot_dot'
directory.mkdir()
__snake_case : Optional[int] = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(_UpperCAmelCase ,'w' ) as f:
f.add(_UpperCAmelCase ,arcname=os.path.join('..' ,text_file.name ) )
return path
@pytest.fixture
def a_ ( _UpperCAmelCase : int ) -> Dict:
import tarfile
__snake_case : Optional[int] = tmp_path / 'data_sym_link'
directory.mkdir()
__snake_case : Tuple = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' ,directory / 'subdir' ,target_is_directory=_UpperCAmelCase )
with tarfile.TarFile(_UpperCAmelCase ,'w' ) as f:
f.add(str(directory / 'subdir' ) ,arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' ,[('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] ,)
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ) -> Union[str, Any]:
__snake_case : Dict = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[Any] = tmp_path / 'extracted'
TarExtractor.extract(_UpperCAmelCase ,_UpperCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Dict = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
__snake_case : str = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(_UpperCAmelCase )
assert zipfile.is_zipfile(str(_UpperCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_UpperCAmelCase ) # but we're right
| 124 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase__: Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__: Dict = 256
class snake_case_ ( _UpperCAmelCase ):
__lowerCamelCase : List[Any] = ['''melgan''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
super().__init__()
# From MELGAN
SCREAMING_SNAKE_CASE_ : List[str] = math.log(1e-5 ) # Matches MelGAN training.
SCREAMING_SNAKE_CASE_ : str = 4.0 # Largest value for most examples
SCREAMING_SNAKE_CASE_ : str = 128
self.register_modules(
notes_encoder=_lowercase , continuous_encoder=_lowercase , decoder=_lowercase , scheduler=_lowercase , melgan=_lowercase , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=(-1.0, 1.0) , __lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : Optional[int] = output_range
if clip:
SCREAMING_SNAKE_CASE_ : Tuple = torch.clip(_lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
SCREAMING_SNAKE_CASE_ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=(-1.0, 1.0) , __lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : Tuple = input_range
SCREAMING_SNAKE_CASE_ : str = torch.clip(_lowercase , _lowercase , _lowercase ) if clip else outputs
# Scale to [0, 1].
SCREAMING_SNAKE_CASE_ : int = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = input_tokens > 0
SCREAMING_SNAKE_CASE_ : int = self.notes_encoder(
encoder_input_tokens=_lowercase , encoder_inputs_mask=_lowercase )
SCREAMING_SNAKE_CASE_ : List[str] = self.continuous_encoder(
encoder_inputs=_lowercase , encoder_inputs_mask=_lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = noise_time
if not torch.is_tensor(_lowercase ):
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_lowercase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ : Any = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE_ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE_ : Any = self.decoder(
encodings_and_masks=_lowercase , decoder_input_tokens=_lowercase , decoder_noise_time=_lowercase )
return logits
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 100 , __lowerCAmelCase = True , __lowerCAmelCase = "numpy" , __lowerCAmelCase = None , __lowerCAmelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_lowercase )}.' )
SCREAMING_SNAKE_CASE_ : Tuple = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : Tuple = np.zeros([1, 0, self.n_dims] , np.floataa )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(_lowercase ):
if i == 0:
SCREAMING_SNAKE_CASE_ : Dict = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
SCREAMING_SNAKE_CASE_ : Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
SCREAMING_SNAKE_CASE_ : Dict = ones
SCREAMING_SNAKE_CASE_ : Dict = self.scale_features(
_lowercase , output_range=[-1.0, 1.0] , clip=_lowercase )
SCREAMING_SNAKE_CASE_ : List[Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_lowercase , continuous_mask=_lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
SCREAMING_SNAKE_CASE_ : List[Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE_ : int = self.decode(
encodings_and_masks=_lowercase , input_tokens=_lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : Any = self.scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = self.scale_to_features(_lowercase , input_range=[-1.0, 1.0] )
SCREAMING_SNAKE_CASE_ : List[str] = mel[:1]
SCREAMING_SNAKE_CASE_ : List[str] = mel.cpu().float().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase )
logger.info('Generated segment' , _lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
SCREAMING_SNAKE_CASE_ : List[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
SCREAMING_SNAKE_CASE_ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_lowercase )
| 345 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :List[str]=3 , lowerCamelCase_ :Union[str, Any]=1_0 , lowerCamelCase_ :Optional[Any]=1_8 , lowerCamelCase_ :List[str]=3_0 , lowerCamelCase_ :str=4_0_0 , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Any=True , lowerCamelCase_ :Any=[0.5, 0.5, 0.5] , lowerCamelCase_ :int=[0.5, 0.5, 0.5] , lowerCamelCase_ :int=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = size if size is not None else {"shortest_edge": 1_8}
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_frames
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = crop_size
def lowerCamelCase__ ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A = VivitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self :str ) -> Any:
"""simple docstring"""
UpperCamelCase__ = VivitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self :str ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self :int ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def lowerCamelCase__ ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase__ ( self :Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__ = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self :List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__ = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self :int ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__ = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 716 | """simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case__ ( _snake_case : Dict ):
"""simple docstring"""
for param in module.parameters():
UpperCamelCase__ = False
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def snake_case__ ( _snake_case : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = datetime.now()
UpperCamelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp | 304 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
def UpperCamelCase__ ( ) -> Node | None:
__UpperCAmelCase: Any = Node(1 )
__UpperCAmelCase: Optional[int] = Node(2 )
__UpperCAmelCase: str = Node(3 )
__UpperCAmelCase: Optional[int] = Node(4 )
__UpperCAmelCase: int = Node(5 )
return tree
def UpperCamelCase__ ( _lowercase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase__ ( _lowercase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase__ ( _lowercase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase__ ( _lowercase : Node | None ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase__ ( _lowercase : Node | None ) -> Sequence[Node | None]:
__UpperCAmelCase: list[Any] = []
if root is None:
return output
__UpperCAmelCase: List[Any] = deque([root] )
while process_queue:
__UpperCAmelCase: Dict = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase__ ( _lowercase : Node | None , _lowercase : int ) -> Sequence[Node | None]:
__UpperCAmelCase: list[Any] = []
def populate_output(_lowercase : Node | None , _lowercase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_lowercase , _lowercase )
return output
def UpperCamelCase__ ( _lowercase : Node | None , _lowercase : int ) -> Sequence[Node | None]:
__UpperCAmelCase: list[Any] = []
def populate_output(_lowercase : Node | None , _lowercase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_lowercase , _lowercase )
return output
def UpperCamelCase__ ( _lowercase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__UpperCAmelCase: list[Sequence[Node | None]] = []
__UpperCAmelCase: Dict = 0
__UpperCAmelCase: int = height(_lowercase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowercase , _lowercase ) )
__UpperCAmelCase: int = 1
else:
output.append(get_nodes_from_right_to_left(_lowercase , _lowercase ) )
__UpperCAmelCase: List[str] = 0
return output
def UpperCamelCase__ ( ) -> None: # Main function for testing.
__UpperCAmelCase: str = make_tree()
print(F'''In-order Traversal: {inorder(_lowercase )}''' )
print(F'''Pre-order Traversal: {preorder(_lowercase )}''' )
print(F'''Post-order Traversal: {postorder(_lowercase )}''' , """\n""" )
print(F'''Height of Tree: {height(_lowercase )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowercase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(_lowercase ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(_lowercase , level=_lowercase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 523 | '''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase__ ( ) -> Optional[Any]:
__UpperCAmelCase: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=_lowercase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=_lowercase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=_lowercase , help="""where to store parsed gold_data_path file""" , )
__UpperCAmelCase: Optional[int] = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
__UpperCAmelCase: List[Any] = json.load(_lowercase )
for dpr_record in tqdm(_lowercase ):
__UpperCAmelCase: Tuple = dpr_record["""question"""]
__UpperCAmelCase: str = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(_lowercase ) + """\n""" )
if __name__ == "__main__":
main() | 523 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case=2_81_23 ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
_UpperCamelCase = set()
_UpperCamelCase = 0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(__snake_case )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 78 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = 1 / 2_55 , __a = True , __a = 8 , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
_UpperCamelCase = pad_size
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = get_image_size(__a)
_UpperCamelCase = (old_height // size + 1) * size - old_height
_UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_pad if do_pad is not None else self.do_pad
_UpperCamelCase = pad_size if pad_size is not None else self.pad_size
_UpperCamelCase = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_pad:
_UpperCamelCase = [self.pad(__a , size=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 78 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
__UpperCAmelCase = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
__UpperCAmelCase = {
'ctrl': 256,
}
__UpperCAmelCase = {
'Pregnancy': 168629,
'Christianity': 7675,
'Explain': 106423,
'Fitness': 63440,
'Saving': 63163,
'Ask': 27171,
'Ass': 95985,
'Joke': 163509,
'Questions': 45622,
'Thoughts': 49605,
'Retail': 52342,
'Feminism': 164338,
'Writing': 11992,
'Atheism': 192263,
'Netflix': 48616,
'Computing': 39639,
'Opinion': 43213,
'Alone': 44967,
'Funny': 58917,
'Gaming': 40358,
'Human': 4088,
'India': 1331,
'Joker': 77138,
'Diet': 36206,
'Legal': 11859,
'Norman': 4939,
'Tip': 72689,
'Weight': 52343,
'Movies': 46273,
'Running': 23425,
'Science': 2090,
'Horror': 37793,
'Confession': 60572,
'Finance': 12250,
'Politics': 16360,
'Scary': 191985,
'Support': 12654,
'Technologies': 32516,
'Teenage': 66160,
'Event': 32769,
'Learned': 67460,
'Notion': 182770,
'Wikipedia': 37583,
'Books': 6665,
'Extract': 76050,
'Confessions': 102701,
'Conspiracy': 75932,
'Links': 63674,
'Narcissus': 150425,
'Relationship': 54766,
'Relationships': 134796,
'Reviews': 41671,
'News': 4256,
'Translation': 26820,
'multilingual': 128406,
}
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = set()
UpperCAmelCase_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : int = char
UpperCAmelCase_ : Tuple = set(__snake_case )
return pairs
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Optional[int] = CONTROL_CODES
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="<unk>" , **_UpperCamelCase ) -> int:
super().__init__(unk_token=_UpperCamelCase , **_UpperCamelCase )
with open(_UpperCamelCase , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase_ : int = json.load(_UpperCamelCase )
UpperCAmelCase_ : Any = {v: k for k, v in self.encoder.items()}
with open(_UpperCamelCase , encoding='utf-8' ) as merges_handle:
UpperCAmelCase_ : List[str] = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase_ : Optional[int] = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase_ : List[str] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : Optional[Any] = {}
@property
def __UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Optional[int] = tuple(_UpperCamelCase )
UpperCAmelCase_ : str = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCAmelCase_ : List[str] = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = bigram
UpperCAmelCase_ : str = []
UpperCAmelCase_ : int = 0
while i < len(_UpperCamelCase ):
try:
UpperCAmelCase_ : Optional[int] = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Optional[Any] = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Dict = tuple(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = new_word
if len(_UpperCamelCase ) == 1:
break
else:
UpperCAmelCase_ : str = get_pairs(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = '@@ '.join(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = word[:-4]
UpperCAmelCase_ : List[str] = word
return word
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Union[str, Any] = re.findall(r'\S+\n?' , _UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(_UpperCamelCase ).split(' ' ) ) )
return split_tokens
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
return self.decoder.get(_UpperCamelCase , self.unk_token )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ' '.join(_UpperCamelCase ).replace('@@ ' , '' ).strip()
return out_string
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : int = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : Optional[int] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + '\n' )
UpperCAmelCase_ : List[str] = 0
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase_ : Optional[Any] = token_index
writer.write(' '.join(_UpperCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 406 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase_ : Optional[int] = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
UpperCAmelCase_ : Optional[Any] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
UpperCAmelCase_ : List[str] = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
UpperCAmelCase_ : Dict = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
UpperCAmelCase_ : Any = orig_key.split('.' )[0].split('_' )[-1]
UpperCAmelCase_ : Optional[Any] = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
UpperCAmelCase_ : List[str] = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
UpperCAmelCase_ : Union[str, Any] = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
UpperCAmelCase_ : Any = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
UpperCAmelCase_ : Tuple = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
UpperCAmelCase_ : List[str] = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
UpperCAmelCase_ : str = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
UpperCAmelCase_ : Dict = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
UpperCAmelCase_ : Optional[int] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
UpperCAmelCase_ : Optional[Any] = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
UpperCAmelCase_ : Union[str, Any] = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
UpperCAmelCase_ : List[Any] = 'yoso.' + orig_key
return orig_key
def lowercase__ ( __snake_case : str , __snake_case : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Any = orig_state_dict.pop(__snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase_ : Union[str, Any] = val
UpperCAmelCase_ : List[Any] = orig_state_dict['cls.predictions.decoder.bias']
UpperCAmelCase_ : Tuple = torch.arange(__snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase__ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = torch.load(__snake_case , map_location='cpu' )['model_state_dict']
UpperCAmelCase_ : Dict = YosoConfig.from_json_file(__snake_case )
UpperCAmelCase_ : str = YosoForMaskedLM(__snake_case )
UpperCAmelCase_ : Dict = convert_checkpoint_helper(config.max_position_embeddings , __snake_case )
print(model.load_state_dict(__snake_case ) )
model.eval()
model.save_pretrained(__snake_case )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 406 | 1 |
"""simple docstring"""
from __future__ import annotations
def A_ ( __lowercase ):
UpperCamelCase_ : List[Any] =0.00
UpperCamelCase_ : Dict =0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase_ : List[str] =F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__lowercase )
first_sum += 1 / float(__lowercase )
index += 1
return 1 / first_sum
def A_ ( __lowercase ):
UpperCamelCase_ : Union[str, Any] =0.00
UpperCamelCase_ : Optional[int] =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase_ : Tuple =F'''Resistor at index {index} has a negative value!'''
raise ValueError(__lowercase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 395 |
"""simple docstring"""
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A_ ( __lowercase ):
UpperCamelCase_ : int =[chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
UpperCamelCase_ : Any =remove_duplicates(key.upper() )
UpperCamelCase_ : int =len(__lowercase )
# First fill cipher with key characters
UpperCamelCase_ : Union[str, Any] ={alphabet[i]: char for i, char in enumerate(__lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__lowercase ) , 26 ):
UpperCamelCase_ : List[Any] =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCamelCase_ : str =alphabet[i - offset]
UpperCamelCase_ : int =char
return cipher_alphabet
def A_ ( __lowercase , __lowercase ):
return "".join(cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : str ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def A_ ( ):
UpperCamelCase_ : Tuple =input('Enter message to encode or decode: ' ).strip()
UpperCamelCase_ : int =input('Enter keyword: ' ).strip()
UpperCamelCase_ : List[Any] =input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
UpperCamelCase_ : List[str] ={'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
UpperCamelCase_ : List[Any] =create_cipher_map(__lowercase )
print(func(__lowercase , __lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 395 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(
__lowerCamelCase , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class UpperCAmelCase_ ( __lowerCamelCase ):
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if self.framework == "tf":
UpperCAmelCase__ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase__ : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = self.get_masked_index(_lowerCAmelCase )
UpperCAmelCase__ : Any = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
if return_tensors is None:
UpperCAmelCase__ : Union[str, Any] = self.framework
UpperCAmelCase__ : List[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.ensure_exactly_one_mask_token(_lowerCAmelCase )
return model_inputs
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : List[str] = self.model(**_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""]
return model_outputs
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=5 , _lowerCAmelCase=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase__ : List[str] = target_ids.shape[0]
UpperCAmelCase__ : Tuple = model_outputs["""input_ids"""][0]
UpperCAmelCase__ : int = model_outputs["""logits"""]
if self.framework == "tf":
UpperCAmelCase__ : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase__ : List[str] = outputs.numpy()
UpperCAmelCase__ : Optional[Any] = outputs[0, masked_index, :]
UpperCAmelCase__ : Tuple = stable_softmax(_lowerCAmelCase , axis=-1 )
if target_ids is not None:
UpperCAmelCase__ : Optional[Any] = tf.gather_nd(tf.squeeze(_lowerCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase__ : List[str] = tf.expand_dims(_lowerCAmelCase , 0 )
UpperCAmelCase__ : Optional[int] = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase__ : Dict = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase__ : Optional[Any] = outputs[0, masked_index, :]
UpperCAmelCase__ : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase__ : Optional[int] = probs[..., target_ids]
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = probs.topk(_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase__ : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase__ : Tuple = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase__ : Tuple = target_ids[p].tolist()
UpperCAmelCase__ : Tuple = p
# Filter padding out:
UpperCAmelCase__ : Tuple = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase__ : Tuple = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(_lowerCAmelCase )
result.append(_lowerCAmelCase )
if single_mask:
return result[0]
return result
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Any = [targets]
try:
UpperCAmelCase__ : Optional[Any] = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Dict = []
for target in targets:
UpperCAmelCase__ : Tuple = vocab.get(_lowerCAmelCase , _lowerCAmelCase )
if id_ is None:
UpperCAmelCase__ : Optional[int] = self.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , max_length=1 , truncation=_lowerCAmelCase , )["""input_ids"""]
if len(_lowerCAmelCase ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
UpperCAmelCase__ : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
UpperCAmelCase__ : List[Any] = list(set(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
UpperCAmelCase__ : int = np.array(_lowerCAmelCase )
return target_ids
def __UpperCAmelCase ( self , _lowerCAmelCase=None , _lowerCAmelCase=None ):
UpperCAmelCase__ : List[str] = {}
if targets is not None:
UpperCAmelCase__ : Optional[Any] = self.get_target_ids(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : int = target_ids
if top_k is not None:
UpperCAmelCase__ : Dict = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) == 1:
return outputs[0]
return outputs
| 79 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowercase : Tuple = "base_with_context"
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[Any]:
_snake_case = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
_snake_case = weights[F'layers_{lyr_num}']
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_snake_case = ly_weight['attention']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[Any]:
_snake_case = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
_snake_case = weights[F'layers_{lyr_num}']
_snake_case = ly_weight['attention']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[Any]:
_snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__A )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_snake_case = weights[F'layers_{lyr_num}']
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
_snake_case = ly_weight['self_attention']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = ly_weight['MultiHeadDotProductAttention_0']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[Any]:
_snake_case = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_snake_case = jnp.tree_util.tree_map(onp.array , __A )
_snake_case = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_snake_case = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
_snake_case = inference.parse_training_gin_file(__A , __A )
_snake_case = inference.InferenceModel(args.checkpoint_path , __A )
_snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
_snake_case = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_snake_case = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_snake_case = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_snake_case = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __A )
_snake_case = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __A )
_snake_case = load_decoder(ta_checkpoint['target']['decoder'] , __A )
_snake_case = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
_snake_case = SpectrogramDiffusionPipeline(
notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
lowercase : str = parser.parse_args()
main(args)
| 495 | 0 |
import random
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = a[left_index]
__lowerCAmelCase = left_index + 1
for j in range(left_index + 1, lowerCAmelCase_ ):
if a[j] < pivot:
__lowerCAmelCase , __lowerCAmelCase = a[i], a[j]
i += 1
__lowerCAmelCase , __lowerCAmelCase = a[i - 1], a[left_index]
return i - 1
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : str ):
if left < right:
__lowerCAmelCase = random.randint(lowerCAmelCase_, right - 1 )
__lowerCAmelCase , __lowerCAmelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__lowerCAmelCase = partition(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
quick_sort_random(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase_, pivot_index + 1, lowerCAmelCase_ ) # recursive quicksort to the right of the pivot point
def a_ ( ):
__lowerCAmelCase = input('Enter numbers separated by a comma:\n' ).strip()
__lowerCAmelCase = [int(lowerCAmelCase_ ) for item in user_input.split(',' )]
quick_sort_random(lowerCAmelCase_, 0, len(lowerCAmelCase_ ) )
print(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 705 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def a_ ( lowerCAmelCase_ : Dict=32, lowerCAmelCase_ : int=10, lowerCAmelCase_ : List[str]=100, lowerCAmelCase_ : Tuple=1026, lowerCAmelCase_ : Optional[Any]=True, lowerCAmelCase_ : Tuple="data/tokenized_stories_train_wikitext103.jbl", lowerCAmelCase_ : Optional[int]="igf_context_pairs.jbl", ):
set_seed(3 )
# generate train_data and objective_set
__lowerCAmelCase , __lowerCAmelCase = generate_datasets(
lowerCAmelCase_, lowerCAmelCase_, number=lowerCAmelCase_, min_len=1026, trim=lowerCAmelCase_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__lowerCAmelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
__lowerCAmelCase = load_gpta('gpt2' ).to(lowerCAmelCase_ )
print('computing perplexity on objective set' )
__lowerCAmelCase = compute_perplexity(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ).item()
print('perplexity on objective set:', lowerCAmelCase_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Any=15, lowerCAmelCase_ : Optional[int]=128, lowerCAmelCase_ : Optional[int]=100, lowerCAmelCase_ : Tuple="igf_model.pt", ):
set_seed(42 )
# Load pre-trained model
__lowerCAmelCase = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
__lowerCAmelCase = SecondaryLearner(lowerCAmelCase_ )
# Train secondary learner
__lowerCAmelCase = train_secondary_learner(
lowerCAmelCase_, lowerCAmelCase_, max_epochs=lowerCAmelCase_, batch_size=lowerCAmelCase_, eval_freq=100, igf_model_path=lowerCAmelCase_, )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Tuple=32, lowerCAmelCase_ : Any=1000, lowerCAmelCase_ : Union[str, Any]=16, lowerCAmelCase_ : Any=1.0, lowerCAmelCase_ : Dict=recopy_gpta, lowerCAmelCase_ : Dict=None, lowerCAmelCase_ : Optional[Any]=10, lowerCAmelCase_ : Union[str, Any]="gpt2_finetuned.pt", ):
__lowerCAmelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
__lowerCAmelCase = RandomSampler(lowerCAmelCase_ )
__lowerCAmelCase = DataLoader(lowerCAmelCase_, sampler=lowerCAmelCase_ )
__lowerCAmelCase = max_steps // (len(lowerCAmelCase_ )) + 1
__lowerCAmelCase = 0
__lowerCAmelCase = torch.zeros((1, context_len), dtype=torch.long, device=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = recopy_model(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCAmelCase_ )
secondary_learner.eval()
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
# Compute the performance of the transformer model at the beginning
__lowerCAmelCase = compute_perplexity(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
test_perps.append(lowerCAmelCase_ )
print('Test perplexity, step', lowerCAmelCase_, ':', lowerCAmelCase_ )
for epoch in range(int(lowerCAmelCase_ ) ):
for step, example in enumerate(lowerCAmelCase_ ):
torch.cuda.empty_cache()
__lowerCAmelCase = random.randint(0, example.size(2 ) - context_len - 1 )
__lowerCAmelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__lowerCAmelCase = model(lowerCAmelCase_, labels=lowerCAmelCase_ )
__lowerCAmelCase = True
if secondary_learner is not None:
__lowerCAmelCase = secondary_learner.forward(
torch.tensor(lowerCAmelCase_, dtype=torch.long, device=lowerCAmelCase_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCAmelCase_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__lowerCAmelCase = -1
if predicted_q < threshold:
__lowerCAmelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__lowerCAmelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__lowerCAmelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__lowerCAmelCase = compute_perplexity(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
test_perps.append(lowerCAmelCase_ )
print('Test perplexity, step', lowerCAmelCase_, ':', lowerCAmelCase_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict(), lowerCAmelCase_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir', default=lowerCAmelCase_, type=lowerCAmelCase_, required=lowerCAmelCase_, help='The input data dir. Should contain data files for WikiText.', )
parser.add_argument(
'--model_name_or_path', default=lowerCAmelCase_, type=lowerCAmelCase_, required=lowerCAmelCase_, help='Path to pretrained model or model identifier from huggingface.co/models', )
parser.add_argument(
'--data_file', type=lowerCAmelCase_, default=lowerCAmelCase_, help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
), )
parser.add_argument(
'--igf_data_file', type=lowerCAmelCase_, default=lowerCAmelCase_, help='A jbl file containing the context and information gain pairs to train secondary learner.', )
parser.add_argument(
'--output_dir', default=lowerCAmelCase_, type=lowerCAmelCase_, required=lowerCAmelCase_, help='The output directory where the final fine-tuned model is stored.', )
parser.add_argument(
'--tokenizer_name', default=lowerCAmelCase_, type=lowerCAmelCase_, help='Pretrained tokenizer name or path if not the same as model_name', )
parser.add_argument('--seed', type=lowerCAmelCase_, default=lowerCAmelCase_, help='A seed for reproducible training.' )
parser.add_argument(
'--context_len', default=32, type=lowerCAmelCase_, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--size_objective_set', default=100, type=lowerCAmelCase_, help='number of articles that are long enough to be used as our objective set', )
parser.add_argument(
'--eval_freq', default=100, type=lowerCAmelCase_, help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps', default=1000, type=lowerCAmelCase_, help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size', default=128, type=lowerCAmelCase_, help='batch size of training data for secondary learner', )
parser.add_argument(
'--batch_size', default=16, type=lowerCAmelCase_, help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval', default=10, type=lowerCAmelCase_, help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
), )
parser.add_argument(
'--number', default=100, type=lowerCAmelCase_, help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len', default=1026, type=lowerCAmelCase_, help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs', default=15, type=lowerCAmelCase_, help='number of epochs to train secondary learner' )
parser.add_argument('--trim', default=lowerCAmelCase_, type=lowerCAmelCase_, help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold', default=1.0, type=lowerCAmelCase_, help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
), )
parser.add_argument('--finetuned_model_name', default='gpt2_finetuned.pt', type=lowerCAmelCase_, help='finetuned_model_name' )
parser.add_argument(
'--recopy_model', default=lowerCAmelCase_, type=lowerCAmelCase_, help='Reset the model to the original pretrained GPT-2 weights after each iteration', )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32, max_steps=10, size_objective_set=100, min_len=1026, trim=lowerCAmelCase_, data_file='data/tokenized_stories_train_wikitext103.jbl', igf_data_file='igf_context_pairs.jbl', )
# Load train data for secondary learner
__lowerCAmelCase = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
__lowerCAmelCase = training_secondary_learner(
lowerCAmelCase_, secondary_learner_max_epochs=15, secondary_learner_batch_size=128, eval_freq=100, igf_model_path='igf_model.pt', )
# load pretrained gpt2 model
__lowerCAmelCase = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__lowerCAmelCase , __lowerCAmelCase = generate_datasets(
context_len=32, file='data/tokenized_stories_train_wikitext103.jbl', number=100, min_len=1026, trim=lowerCAmelCase_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, context_len=32, max_steps=1000, batch_size=16, threshold=1.0, recopy_model=lowerCAmelCase_, secondary_learner=lowerCAmelCase_, eval_interval=10, finetuned_model_name='gpt2_finetuned.pt', )
if __name__ == "__main__":
main()
| 421 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Any = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
A__ : str = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
A__ : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Any = ['input_ids', 'attention_mask']
lowerCamelCase : Optional[Any] = DistilBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
__lowerCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
__lowerCamelCase : Optional[Any] = do_lower_case
__lowerCamelCase : Dict = strip_accents
__lowerCamelCase : Optional[Any] = tokenize_chinese_chars
__lowerCamelCase : Tuple = normalizer_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = do_lower_case
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Any:
__lowerCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 13 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case__ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'albert'
def __init__( self : Any , __A : Optional[int]=30000 , __A : List[str]=128 , __A : int=4096 , __A : Any=12 , __A : Union[str, Any]=1 , __A : Optional[int]=64 , __A : Dict=16384 , __A : List[str]=1 , __A : Any="gelu_new" , __A : List[Any]=0 , __A : str=0 , __A : List[str]=512 , __A : Optional[Any]=2 , __A : Tuple=0.02 , __A : int=1E-12 , __A : str=0.1 , __A : Optional[Any]="absolute" , __A : Tuple=0 , __A : str=2 , __A : Union[str, Any]=3 , **__A : Union[str, Any] , ) ->str:
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
a__ :Tuple = vocab_size
a__ :Dict = embedding_size
a__ :Union[str, Any] = hidden_size
a__ :int = num_hidden_layers
a__ :List[Any] = num_hidden_groups
a__ :str = num_attention_heads
a__ :Optional[Any] = inner_group_num
a__ :Any = hidden_act
a__ :Optional[int] = intermediate_size
a__ :Optional[Any] = hidden_dropout_prob
a__ :Optional[int] = attention_probs_dropout_prob
a__ :int = max_position_embeddings
a__ :List[Any] = type_vocab_size
a__ :Dict = initializer_range
a__ :Optional[int] = layer_norm_eps
a__ :Optional[Any] = classifier_dropout_prob
a__ :Optional[Any] = position_embedding_type
class lowerCAmelCase_ ( _a):
@property
def _snake_case ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a__ :List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ :Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 719 |
import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ) ->Any:
"""simple docstring"""
a__ :Optional[Any] = []
def _snake_case ( self : Optional[Any] , __A : List[Any] ) ->List[str]:
"""simple docstring"""
return self.node_position[vertex]
def _snake_case ( self : Optional[Any] , __A : str , __A : Any ) ->Dict:
"""simple docstring"""
a__ :Dict = pos
def _snake_case ( self : str , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : Optional[int] ) ->List[Any]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
a__ :str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
a__ :Optional[int] = 2 * start + 1
else:
a__ :List[Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
a__ , a__ :Optional[Any] = heap[smallest_child], positions[smallest_child]
a__ , a__ :int = (
heap[start],
positions[start],
)
a__ , a__ :List[Any] = temp, tempa
a__ :Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __A )
self.top_to_bottom(__A , __A , __A , __A )
def _snake_case ( self : List[str] , __A : Any , __A : List[str] , __A : Any , __A : str ) ->Optional[Any]:
"""simple docstring"""
a__ :Optional[Any] = position[index]
while index != 0:
a__ :str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
a__ :int = heap[parent]
a__ :Optional[Any] = position[parent]
self.set_position(position[parent] , __A )
else:
a__ :List[Any] = val
a__ :List[Any] = temp
self.set_position(__A , __A )
break
a__ :Union[str, Any] = parent
else:
a__ :int = val
a__ :Dict = temp
self.set_position(__A , 0 )
def _snake_case ( self : Tuple , __A : int , __A : int ) ->Union[str, Any]:
"""simple docstring"""
a__ :Tuple = len(__A ) // 2 - 1
for i in range(__A , -1 , -1 ):
self.top_to_bottom(__A , __A , len(__A ) , __A )
def _snake_case ( self : List[Any] , __A : List[Any] , __A : int ) ->Optional[Any]:
"""simple docstring"""
a__ :Any = positions[0]
a__ :str = sys.maxsize
self.top_to_bottom(__A , 0 , len(__A ) , __A )
return temp
def lowerCamelCase__ ( a : Any ) -> Union[str, Any]:
"""simple docstring"""
a__ :Tuple = Heap()
a__ :List[Any] = [0] * len(a )
a__ :str = [-1] * len(a ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
a__ :Any = [] # Heap of Distance of vertices from their neighboring vertex
a__ :int = []
for vertex in range(len(a ) ):
distance_tv.append(sys.maxsize )
positions.append(a )
heap.node_position.append(a )
a__ :Tuple = []
a__ :Any = 1
a__ :int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
a__ :int = 0
a__ :List[str] = distance
heap.heapify(a , a )
for _ in range(1 , len(a ) ):
a__ :Dict = heap.delete_minimum(a , a )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
a__ :Optional[int] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(a )]
):
a__ :List[str] = distance
heap.bottom_to_top(
a , heap.get_position(a ) , a , a )
a__ :str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
snake_case__ = int(input('''Enter number of edges: ''').strip())
snake_case__ = defaultdict(list)
for _ in range(edges_number):
snake_case__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 373 | 0 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
"""simple docstring"""
return "".join(chr(ord(snake_case__ ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 51 |
'''simple docstring'''
def _A ( snake_case__ : list[int] , snake_case__ : list[int] ):
snake_case__ : Tuple = len(snake_case__ )
print('''The following activities are selected:''' )
# The first activity is always selected
snake_case__ : Optional[Any] = 0
print(snake_case__ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case__ , end=''',''' )
snake_case__ : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[str] = [1, 3, 0, 5, 8, 5]
_lowerCAmelCase : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 261 | 0 |
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''microsoft/speecht5_tts'''
lowerCamelCase :Optional[int] = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
lowerCamelCase :int = '''text_reader'''
lowerCamelCase :str = SpeechTaProcessor
lowerCamelCase :Any = SpeechTaForTextToSpeech
lowerCamelCase :int = SpeechTaHifiGan
lowerCamelCase :Tuple = ['''text''']
lowerCamelCase :List[Any] = ['''audio''']
def UpperCAmelCase ( self ) -> Any:
if self.post_processor is None:
_A = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Dict:
_A = self.pre_processor(text=UpperCamelCase_ , return_tensors="""pt""" , truncation=UpperCamelCase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_A = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_A = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
with torch.no_grad():
return self.post_processor(UpperCamelCase_ ).cpu().detach()
| 704 | def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 83 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __a :
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[jnp.ndarray] = None
UpperCamelCase_ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any )-> Union[str, Any]:
"""simple docstring"""
return cls()
@dataclass
class __a ( __UpperCAmelCase ):
UpperCamelCase_ : jnp.ndarray
UpperCamelCase_ : jnp.ndarray
UpperCamelCase_ : KarrasVeSchedulerState
class __a ( __UpperCAmelCase , __UpperCAmelCase ):
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> str:
"""simple docstring"""
return True
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase_ : Any = 0.02 , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : Union[str, Any] = 1.007 , UpperCAmelCase_ : Dict = 80 , UpperCAmelCase_ : Optional[int] = 0.05 , UpperCAmelCase_ : int = 50 , )-> List[Any]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> str:
"""simple docstring"""
return KarrasVeSchedulerState.create()
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] = () )-> KarrasVeSchedulerState:
"""simple docstring"""
UpperCamelCase = jnp.arange(0 , UpperCAmelCase_ )[::-1].copy()
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=UpperCAmelCase_ , schedule=jnp.array(UpperCAmelCase_ , dtype=jnp.floataa ) , timesteps=UpperCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , )-> Tuple[jnp.ndarray, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = random.split(UpperCAmelCase_ , num=1 )
UpperCamelCase = self.config.s_noise * random.normal(key=UpperCAmelCase_ , shape=sample.shape )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] = True , )-> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase_ , derivative=UpperCAmelCase_ , state=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] = True , )-> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase_ , derivative=UpperCAmelCase_ , state=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] )-> int:
"""simple docstring"""
raise NotImplementedError()
| 554 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=3 , lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : Any = batch_size
a__ : Tuple = image_size
a__ : Optional[Any] = patch_size
a__ : Optional[Any] = num_channels
a__ : Dict = is_training
a__ : Optional[int] = use_labels
a__ : Optional[Any] = hidden_size
a__ : Dict = num_hidden_layers
a__ : Union[str, Any] = num_attention_heads
a__ : Optional[Any] = intermediate_size
a__ : Dict = hidden_act
a__ : Tuple = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : List[str] = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Any = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : str = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : List[str] = None
if self.use_labels:
a__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> int:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = TFViTModel(config=lowercase)
a__ : int = model(lowercase , training=lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
a__ : Optional[Any] = self.image_size // 2
a__ : List[str] = pixel_values[:, :, :image_size, :image_size]
a__ : Union[str, Any] = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase)
a__ : str = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.type_sequence_label_size
a__ : Dict = TFViTForImageClassification(lowercase)
a__ : Tuple = model(lowercase , labels=lowercase , training=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
a__ : str = self.image_size // 2
a__ : int = pixel_values[:, :, :image_size, :image_size]
a__ : List[str] = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a__ : List[Any] = 1
a__ : Optional[int] = TFViTForImageClassification(lowercase)
a__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ : Optional[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
a__ , a__ , a__ : int = config_and_inputs
a__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : List[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__A : Optional[int] = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
__A : Optional[int] = False
__A : Any = False
__A : Tuple = False
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[int] = TFViTModelTester(self)
a__ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ , a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
a__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , tf.keras.layers.Layer))
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(lowercase)
a__ : Optional[int] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : str = TFViTModel.from_pretrained('google/vit-base-patch16-224')
self.assertIsNotNone(lowercase)
def A_ ( ) -> Any:
a__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : int = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
a__ : Optional[int] = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Any = image_processor(images=lowercase , return_tensors='tf')
# forward pass
a__ : Union[str, Any] = model(**lowercase)
# verify the logits
a__ : Any = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Dict = tf.constant([-0.27_44, 0.82_15, -0.08_36])
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1e-4)
| 302 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_lowerCAmelCase = """xvjiarui/stable-diffusion-2-inpainting"""
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
_lowerCAmelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 50
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = num_samples * [init_image]
_lowerCAmelCase = num_samples * [mask_image]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = pipeline.prepare_inputs(_lowercase , _lowercase , _lowercase )
# shard inputs and rng
_lowerCAmelCase = replicate(_lowercase )
_lowerCAmelCase = jax.random.split(_lowercase , jax.device_count() )
_lowerCAmelCase = shard(_lowercase )
_lowerCAmelCase = shard(_lowercase )
_lowerCAmelCase = shard(_lowercase )
_lowerCAmelCase = pipeline(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase )
_lowerCAmelCase = output.images.reshape(_lowercase , 512 , 512 , 3 )
_lowerCAmelCase = images[0, 253:256, 253:256, -1]
_lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 162 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , ):
"""simple docstring"""
_lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
def _lowercase ( self ):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ImageGPTImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """clusters""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCAmelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowercase , """image_processor.json""" )
image_processor_first.to_json_file(_lowercase )
_lowerCAmelCase = self.image_processing_class.from_json_file(_lowercase ).to_dict()
_lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowercase )
_lowerCAmelCase = self.image_processing_class.from_pretrained(_lowercase ).to_dict()
_lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowercase )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def A ():
_lowerCAmelCase = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
_lowerCAmelCase = Image.open(dataset[4]["""file"""] )
_lowerCAmelCase = Image.open(dataset[5]["""file"""] )
_lowerCAmelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_lowerCAmelCase = prepare_images()
# test non-batched
_lowerCAmelCase = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
_lowerCAmelCase = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowercase )
# test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
_lowerCAmelCase = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowercase )
| 162 | 1 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class a__ ( a_ ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str ) -> Any:
with open(lowerCAmelCase_ , encoding='utf-8' ) as input_file:
__A= re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
__A= input_file.read()
__A= regexp.search(lowerCAmelCase_ )
return match
def lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
with open(lowerCAmelCase_ , encoding='utf-8' ) as input_file:
__A= re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
__A= input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__A= regexp.finditer(lowerCAmelCase_ )
__A= [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase ( self : str ) -> Optional[int]:
__A= Path('./datasets' )
__A= list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCAmelCase_ ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
__A= Path('./datasets' )
__A= list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCAmelCase_ ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 186 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : int ) -> Optional[int]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self : int ) -> List[str]:
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
| 186 | 1 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase = 'src/transformers'
# Matches is_xxx_available()
__UpperCAmelCase = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__UpperCAmelCase = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__UpperCAmelCase = re.compile(r'^\s*try:')
# Catches a line with else:
__UpperCAmelCase = re.compile(r'^\s*else:')
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Union[str, Any] ) -> Dict:
if _re_test_backend.search(snake_case_ ) is None:
return None
SCREAMING_SNAKE_CASE : Optional[Any] = [b[0] for b in _re_backend.findall(snake_case_ )]
backends.sort()
return "_and_".join(snake_case_ )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Optional[Any] ) -> List[Any]:
with open(snake_case_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while line_index < len(snake_case_ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case_ ):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE : Tuple = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
SCREAMING_SNAKE_CASE : Optional[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case_ ):
SCREAMING_SNAKE_CASE : Dict = _re_one_line_import_struct.search(snake_case_ ).groups()[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = re.findall(R'\[([^\]]+)\]' , snake_case_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = _re_import_struct_key_value.search(snake_case_ )
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
SCREAMING_SNAKE_CASE : List[str] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
SCREAMING_SNAKE_CASE : Optional[int] = lines[line_index]
if _re_import_struct_add_one.search(snake_case_ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case_ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case_ ) is not None:
SCREAMING_SNAKE_CASE : Tuple = _re_import_struct_add_many.search(snake_case_ ).groups()[0].split(', ' )
SCREAMING_SNAKE_CASE : List[str] = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_between_brackets.search(snake_case_ ) is not None:
SCREAMING_SNAKE_CASE : str = _re_between_brackets.search(snake_case_ ).groups()[0].split(', ' )
SCREAMING_SNAKE_CASE : str = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_quote_object.search(snake_case_ ) is not None:
objects.append(_re_quote_object.search(snake_case_ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
SCREAMING_SNAKE_CASE : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE : int = []
while (
line_index < len(snake_case_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
SCREAMING_SNAKE_CASE : str = lines[line_index]
SCREAMING_SNAKE_CASE : List[Any] = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
SCREAMING_SNAKE_CASE : Any = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case_ ):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
SCREAMING_SNAKE_CASE : str = lines[line_index]
SCREAMING_SNAKE_CASE : Optional[Any] = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
SCREAMING_SNAKE_CASE : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] , snake_case_ : str ) -> Union[str, Any]:
def find_duplicates(snake_case_ : List[Any] ):
return [k for k, v in collections.Counter(snake_case_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE : Optional[Any] = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
SCREAMING_SNAKE_CASE : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
SCREAMING_SNAKE_CASE : Dict = 'base imports' if key == 'none' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> str:
SCREAMING_SNAKE_CASE : Optional[Any] = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case_ , '__init__.py' )
SCREAMING_SNAKE_CASE : int = parse_init(snake_case_ )
if objects is not None:
SCREAMING_SNAKE_CASE : List[Any] = analyze_results(*snake_case_ )
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(snake_case_ ) )
if len(snake_case_ ) > 0:
raise ValueError('\n\n'.join(snake_case_ ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE : int = []
for path, directories, files in os.walk(snake_case_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(snake_case_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case_ ) / folder).glob('*.py' ) ) ) == 0:
continue
SCREAMING_SNAKE_CASE : List[str] = str((Path(snake_case_ ) / folder).relative_to(snake_case_ ) )
SCREAMING_SNAKE_CASE : List[str] = short_path.replace(os.path.sep , '.' )
submodules.append(snake_case_ )
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE : Tuple = str((Path(snake_case_ ) / fname).relative_to(snake_case_ ) )
SCREAMING_SNAKE_CASE : int = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(snake_case_ )
return submodules
__UpperCAmelCase = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def SCREAMING_SNAKE_CASE_ ( ) -> int:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
SCREAMING_SNAKE_CASE : Union[str, Any] = direct_transformers_import(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(snake_case_ , '__init__.py' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , snake_case_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE : Any = '\n'.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
f"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 717 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
A = Features({'text': Value('string' )} )
A = Features({'labels': ClassLabel} )
A = "text"
A = "labels"
def __a ( self ,__SCREAMING_SNAKE_CASE ):
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,__SCREAMING_SNAKE_CASE ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE : Optional[Any] = self.label_schema.copy()
SCREAMING_SNAKE_CASE : Union[str, Any] = features[self.label_column]
SCREAMING_SNAKE_CASE : int = label_schema
return task_template
@property
def __a ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 220 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCamelCase__ ( __A :Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = image.size
__snake_case , __snake_case = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__snake_case = image.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] )
__snake_case = np.array(__A ).astype(np.floataa ) / 255.0
__snake_case = image[None].transpose(0 ,3 ,1 ,2 )
__snake_case = torch.from_numpy(__A )
return 2.0 * image - 1.0
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_UpperCamelCase , unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1_00 , _UpperCamelCase = 0.0 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(_UpperCamelCase , PIL.Image.Image ):
__snake_case = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
__snake_case = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_UpperCamelCase )}' )
if isinstance(_UpperCamelCase , PIL.Image.Image ):
__snake_case = preprocess(_UpperCamelCase )
__snake_case , __snake_case = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__snake_case = (batch_size, self.unet.config.in_channels // 2, height, width)
__snake_case = next(self.unet.parameters() ).dtype
__snake_case = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=self.device , dtype=_UpperCamelCase )
__snake_case = image.to(device=self.device , dtype=_UpperCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_UpperCamelCase , device=self.device )
__snake_case = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case = {}
if accepts_eta:
__snake_case = eta
for t in self.progress_bar(_UpperCamelCase ):
# concat latents and low resolution image in the channel dimension.
__snake_case = torch.cat([latents, image] , dim=1 )
__snake_case = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
# predict the noise residual
__snake_case = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
# decode the image latents with the VQVAE
__snake_case = self.vqvae.decode(_UpperCamelCase ).sample
__snake_case = torch.clamp(_UpperCamelCase , -1.0 , 1.0 )
__snake_case = image / 2 + 0.5
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 268 |
from PIL import Image
def lowerCamelCase__ ( __A :Image ):
"""simple docstring"""
__snake_case , __snake_case = image.size
__snake_case = 0
__snake_case = image.load()
for i in range(__A ):
for j in range(__A ):
__snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
__snake_case = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCamelCase__ = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 268 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase__ : int = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase__ : Optional[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def __lowercase ( _a , _a ):
return np.sqrt(np.sum((np.asarray(_a ) - np.asarray(_a )) ** 2 ) )
def __lowercase ( _a , _a ):
return sum((va - va) ** 2 for va, va in zip(_a , _a ) ) ** (1 / 2)
if __name__ == "__main__":
def __lowercase ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 485 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( _a ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowercase ( ):
snake_case_ : List[str] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
snake_case_ : List[Any] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
snake_case_, snake_case_ : Optional[Any] = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
snake_case_ : Optional[int] = parse_unknown_args(_a )
# Run
snake_case_ : Optional[int] = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 485 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : Tuple = RoCBertTokenizer
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : Tuple = filter_non_english
def __UpperCamelCase ( self ):
super().setUp()
_lowerCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : int = {}
for i, value in enumerate(snake_case_ ):
_lowerCAmelCase : Any = i
_lowerCAmelCase : Optional[Any] = i
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(snake_case_ , snake_case_ , ensure_ascii=snake_case_ )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(snake_case_ , snake_case_ , ensure_ascii=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_lowerCAmelCase : str = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(snake_case_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(snake_case_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(snake_case_ ) , [5, 6, 2, 5, 7, 8] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = RoCBertBasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = RoCBertBasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = RoCBertBasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = RoCBertBasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=snake_case_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_lowerCAmelCase : List[str] = {}
for i, token in enumerate(snake_case_ ):
_lowerCAmelCase : List[Any] = i
_lowerCAmelCase : Dict = RoCBertWordpieceTokenizer(vocab=snake_case_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __UpperCamelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __UpperCamelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
_lowerCAmelCase : str = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : Tuple = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
_lowerCAmelCase : Optional[int] = tokenizer_r.encode_plus(
snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , return_offsets_mapping=snake_case_ , add_special_tokens=snake_case_ , )
_lowerCAmelCase : Dict = tokenizer_r.do_lower_case if hasattr(snake_case_ , """do_lower_case""" ) else False
_lowerCAmelCase : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = ["""的""", """人""", """有"""]
_lowerCAmelCase : Dict = """""".join(snake_case_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : int = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : Dict = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(snake_case_ )
_lowerCAmelCase : List[str] = tokenizer_p.convert_ids_to_tokens(snake_case_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_lowerCAmelCase : str = False
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : int = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : List[Any] = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : Optional[Any] = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : int = tokenizer_r.convert_ids_to_tokens(snake_case_ )
_lowerCAmelCase : Dict = tokenizer_p.convert_ids_to_tokens(snake_case_ )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase : Optional[Any] = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(snake_case_ )
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_lowerCAmelCase : Optional[int] = tokenizer.encode("""你好""" , add_special_tokens=snake_case_ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode("""你是谁""" , add_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_lowerCAmelCase : int = """你好,你是谁"""
_lowerCAmelCase : str = tokenizer.tokenize(snake_case_ )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(snake_case_ )
_lowerCAmelCase : Any = tokenizer.convert_tokens_to_shape_ids(snake_case_ )
_lowerCAmelCase : int = tokenizer.convert_tokens_to_pronunciation_ids(snake_case_ )
_lowerCAmelCase : Optional[Any] = tokenizer.prepare_for_model(
snake_case_ , snake_case_ , snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : Dict = tokenizer.encode_plus(snake_case_ , add_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 384 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class a_ (_a ):
__lowerCAmelCase : List[Any] = """bart"""
__lowerCAmelCase : Tuple = ["""past_key_values"""]
__lowerCAmelCase : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case_=5_0_2_6_5 , snake_case_=1_0_2_4 , snake_case_=1_2 , snake_case_=4_0_9_6 , snake_case_=1_6 , snake_case_=1_2 , snake_case_=4_0_9_6 , snake_case_=1_6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_="gelu" , snake_case_=1_0_2_4 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=0.0 , snake_case_=False , snake_case_=True , snake_case_=3 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=True , snake_case_=2 , snake_case_=2 , **snake_case_ , ):
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : int = d_model
_lowerCAmelCase : Optional[Any] = encoder_ffn_dim
_lowerCAmelCase : Union[str, Any] = encoder_layers
_lowerCAmelCase : int = encoder_attention_heads
_lowerCAmelCase : Optional[Any] = decoder_ffn_dim
_lowerCAmelCase : Any = decoder_layers
_lowerCAmelCase : Tuple = decoder_attention_heads
_lowerCAmelCase : Optional[Any] = dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : int = activation_dropout
_lowerCAmelCase : Dict = activation_function
_lowerCAmelCase : Union[str, Any] = init_std
_lowerCAmelCase : List[Any] = encoder_layerdrop
_lowerCAmelCase : int = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : Tuple = use_cache
_lowerCAmelCase : List[Any] = encoder_layers
_lowerCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , snake_case_ ):
_lowerCAmelCase : Dict = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
class a_ (_a ):
@property
def __UpperCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase : List[str] = {0: """batch"""}
_lowerCAmelCase : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCAmelCase : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
_lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.num_layers
for i in range(snake_case_ ):
_lowerCAmelCase : str = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCAmelCase : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __UpperCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Optional[int] = super().outputs
else:
_lowerCAmelCase : int = super(snake_case_ , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase : str = self.num_layers
for i in range(snake_case_ ):
_lowerCAmelCase : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase : Any = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
_lowerCAmelCase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
_lowerCAmelCase : Union[str, Any] = seq_length if not self.use_past else 1
_lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase : List[str] = dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = common_inputs["""input_ids"""].shape
_lowerCAmelCase : Tuple = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.num_attention_heads
_lowerCAmelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : List[str] = decoder_seq_length + 3
_lowerCAmelCase : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase : Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
_lowerCAmelCase : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.num_layers
_lowerCAmelCase : List[str] = min(snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = max(snake_case_ , snake_case_ ) - min_num_layers
_lowerCAmelCase : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
_lowerCAmelCase : Optional[int] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
_lowerCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Union[str, Any] = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase : Any = self.num_layers
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.num_attention_heads
_lowerCAmelCase : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : Optional[Any] = common_inputs["""attention_mask"""].dtype
_lowerCAmelCase : List[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
_lowerCAmelCase : Any = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase : Optional[int] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase : Tuple = tokenizer.num_special_tokens_to_add(snake_case_ )
_lowerCAmelCase : List[str] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase : Tuple = dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
elif self.task == "causal-lm":
_lowerCAmelCase : Tuple = self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
_lowerCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Union[str, Any] = super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
_lowerCAmelCase : Optional[Any] = super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 384 | 1 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : List[str]=4 , ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : List[str] = seq_length
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : Tuple = use_attention_mask
_UpperCAmelCase : Any = use_token_type_ids
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : List[str] = type_vocab_size
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : str = num_choices
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : int = None
if self.use_attention_mask:
_UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : List[str] = None
if self.use_token_type_ids:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Union[str, Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a_ ( self : Any ) -> int:
'''simple docstring'''
_UpperCAmelCase : Any = self.prepare_config_and_inputs()
_UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def a_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = config_and_inputs
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a_ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Dict = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase_ )
_UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : List[str] = model(UpperCAmelCase_ )[0]
_UpperCAmelCase : str = [1, 11, 50265]
self.assertEqual(list(output.shape ) , UpperCAmelCase_ )
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase_ )
_UpperCAmelCase : List[str] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : List[str] = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : Any = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 704 |
from typing import List
from .keymap import KEYMAP, get_character
def _A ( _UpperCamelCase ):
def decorator(_UpperCamelCase ):
_UpperCAmelCase : Optional[int] = getattr(_UpperCamelCase , '''handle_key''' , [] )
handle += [key]
setattr(_UpperCamelCase , '''handle_key''' , _UpperCamelCase )
return func
return decorator
def _A ( *_UpperCamelCase ):
def decorator(_UpperCamelCase ):
_UpperCAmelCase : Any = getattr(_UpperCamelCase , '''handle_key''' , [] )
handle += keys
setattr(_UpperCamelCase , '''handle_key''' , _UpperCamelCase )
return func
return decorator
class lowerCAmelCase_ ( lowercase_ ):
def __new__( cls : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = super().__new__(cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not hasattr(UpperCAmelCase_ , '''key_handler''' ):
setattr(UpperCAmelCase_ , '''key_handler''' , {} )
setattr(UpperCAmelCase_ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase : List[str] = getattr(UpperCAmelCase_ , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase : Optional[Any] = value
return new_cls
@staticmethod
def a_ ( cls : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase : str = ord(UpperCAmelCase_ )
_UpperCAmelCase : str = cls.key_handler.get(UpperCAmelCase_ )
if handler:
_UpperCAmelCase : Optional[int] = char
return handler(cls )
else:
return None
def _A ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 416 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : int , _A : Optional[int] , _A : Optional[int]=13 , _A : Union[str, Any]=10 , _A : Tuple=3 , _A : Optional[int]=2 , _A : Optional[int]=2 , _A : List[Any]=True , _A : int=True , _A : Tuple=32 , _A : str=5 , _A : List[Any]=4 , _A : Tuple=37 , _A : Any="gelu" , _A : List[str]=0.1 , _A : List[Any]=0.1 , _A : Union[str, Any]=10 , _A : Optional[int]=0.02 , _A : List[Any]="divided_space_time" , _A : List[Any]=None , ) -> str:
"""simple docstring"""
lowercase : Optional[int] = parent
lowercase : List[Any] = batch_size
lowercase : int = image_size
lowercase : Optional[Any] = num_channels
lowercase : Optional[int] = patch_size
lowercase : Tuple = num_frames
lowercase : List[Any] = is_training
lowercase : List[Any] = use_labels
lowercase : int = hidden_size
lowercase : List[str] = num_hidden_layers
lowercase : List[Any] = num_attention_heads
lowercase : int = intermediate_size
lowercase : List[Any] = hidden_act
lowercase : List[Any] = hidden_dropout_prob
lowercase : int = attention_probs_dropout_prob
lowercase : Tuple = attention_type
lowercase : List[str] = initializer_range
lowercase : Union[str, Any] = scope
lowercase : str = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase : Optional[Any] = (image_size // patch_size) ** 2
lowercase : Dict = (num_frames) * self.num_patches_per_frame + 1
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase : Union[str, Any] = None
if self.use_labels:
lowercase : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase : int = self.get_config()
return config, pixel_values, labels
def __a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase : int = self.num_labels
return config
def __a ( self : str , _A : List[str] , _A : Optional[int] , _A : Any ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = TimesformerModel(config=_A )
model.to(_A )
model.eval()
lowercase : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Union[str, Any] , _A : Optional[Any] , _A : Any , _A : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Dict = TimesformerForVideoClassification(_A )
model.to(_A )
model.eval()
lowercase : int = model(_A )
# verify the logits shape
lowercase : Tuple = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _A )
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : Optional[int] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Optional[Any] = config_and_inputs
lowercase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_UpperCamelCase : int = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : str = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[Any] = False
def __a ( self : Dict ) -> str:
"""simple docstring"""
lowercase : str = TimesformerModelTester(self )
lowercase : List[Any] = ConfigTester(
self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __a ( self : List[str] , _A : Any , _A : Any , _A : Tuple=False ) -> Dict:
"""simple docstring"""
lowercase : Union[str, Any] = copy.deepcopy(_A )
if return_labels:
if model_class in get_values(_A ):
lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def __a ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase , lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : int = model_class(_A )
lowercase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Tuple = [*signature.parameters.keys()]
lowercase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __a ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_A )
@slow
def __a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = TimesformerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __a ( self : str ) -> Optional[Any]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[Any] = True
for model_class in self.all_model_classes:
lowercase : Dict = self.model_tester.seq_length
lowercase : Optional[int] = self.model_tester.num_frames
lowercase : Union[str, Any] = True
lowercase : Optional[Any] = False
lowercase : Tuple = True
lowercase : List[Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : int = model(**self._prepare_for_class(_A , _A ) )
lowercase : Optional[int] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase : Any = True
lowercase : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : Any = model(**self._prepare_for_class(_A , _A ) )
lowercase : Union[str, Any] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase : int = len(_A )
# Check attention is always last and order is fine
lowercase : Optional[int] = True
lowercase : int = True
lowercase : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : Dict = model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 1 , len(_A ) )
lowercase : Dict = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(_A : List[Any] , _A : Any , _A : Optional[int] ):
lowercase : Any = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : List[Any] = model(**self._prepare_for_class(_A , _A ) )
lowercase : List[Any] = outputs.hidden_states
lowercase : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_A ) , _A )
lowercase : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : int = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Optional[int] = True
check_hidden_states_output(_A , _A , _A )
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase : List[str] = np.load(__magic_name__ )
return list(__magic_name__ )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase : int = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
_A )
lowercase : Dict = self.default_image_processor
lowercase : List[Any] = prepare_video()
lowercase : int = image_processor(video[:8] , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
lowercase : Any = model(**_A )
# verify the logits
lowercase : List[str] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _A )
lowercase : List[Any] = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) | 217 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'gpt2': 10_24,
'gpt2-medium': 10_24,
'gpt2-large': 10_24,
'gpt2-xl': 10_24,
'distilgpt2': 10_24,
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : Optional[Any] = GPTaTokenizer
def __init__( self : Optional[Any] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , _A : Optional[int]="<|endoftext|>" , _A : List[Any]="<|endoftext|>" , _A : Union[str, Any]="<|endoftext|>" , _A : Any=False , **_A : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
lowercase : List[str] = kwargs.pop('''add_bos_token''' , _A )
lowercase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
lowercase : Optional[int] = getattr(_A , pre_tok_state.pop('''type''' ) )
lowercase : List[str] = add_prefix_space
lowercase : List[Any] = pre_tok_class(**_A )
lowercase : Dict = add_prefix_space
def __a ( self : List[Any] , *_A : Optional[Any] , **_A : Any ) -> BatchEncoding:
"""simple docstring"""
lowercase : List[str] = kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A )
def __a ( self : Dict , *_A : List[str] , **_A : Dict ) -> BatchEncoding:
"""simple docstring"""
lowercase : Any = kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A )
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def __a ( self : Dict , _A : "Conversation" ) -> List[int]:
"""simple docstring"""
lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
lowercase : int = input_ids[-self.model_max_length :]
return input_ids | 217 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( _snake_case ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'CLIPImageProcessor'
lowercase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 713 |
'''simple docstring'''
from math import factorial
def lowercase__ ( __UpperCamelCase = 20 )-> int:
UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase = n // 2
return int(factorial(__UpperCamelCase ) / (factorial(__UpperCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 46 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : List[Any] = ['image_processor', 'tokenizer']
__a : List[Any] = 'BlipImageProcessor'
__a : str = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __a , __a ):
__lowerCamelCase : str = False
super().__init__(__a , __a )
__lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCamelCase : List[Any] = self.tokenizer
__lowerCamelCase : List[str] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
return text_encoding
# add pixel_values
__lowerCamelCase : Any = self.image_processor(__a , return_tensors=__a )
if text is not None:
__lowerCamelCase : Tuple = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
else:
__lowerCamelCase : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(__a )
return encoding_image_processor
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 594 | 0 |
from __future__ import annotations
import math
def __a ( __UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
if num <= 0:
lowerCamelCase_ : List[Any] = f"{num}: Invalid input, please enter a positive integer."
raise ValueError(__UpperCAmelCase )
lowerCamelCase_ : str = [True] * (num + 1)
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : str = 2
lowerCamelCase_ : Any = int(math.sqrt(__UpperCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__UpperCAmelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , __UpperCAmelCase ):
if sieve[i] is True:
lowerCamelCase_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__UpperCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 705 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = [False] * len(__UpperCAmelCase )
lowerCamelCase_ : Dict = [-1] * len(__UpperCAmelCase )
def dfs(__UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ):
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : int = c
for u in graph[v]:
if not visited[u]:
dfs(__UpperCAmelCase , 1 - c )
for i in range(len(__UpperCAmelCase ) ):
if not visited[i]:
dfs(__UpperCAmelCase , 0 )
for i in range(len(__UpperCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
snake_case_ : Tuple = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 253 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : str = DiTPipeline
_UpperCamelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
_UpperCamelCase : str = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_000 , norm_type='ada_norm_zero' , norm_elementwise_affine=a , )
lowercase__ = AutoencoderKL()
lowercase__ = DDIMScheduler()
lowercase__ = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : str=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
lowercase__ = 'cpu'
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = self.get_dummy_inputs(a )
lowercase__ = pipe(**a ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=a , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowercase__ = ['vase', 'umbrella', 'white shark', 'white wolf']
lowercase__ = pipe.get_label_ids(a )
lowercase__ = pipe(a , generator=a , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(a , a ):
lowercase__ = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowercase__ = ['vase', 'umbrella']
lowercase__ = pipe.get_label_ids(a )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(a , generator=a , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(a , a ):
lowercase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 235 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : str , a : int = 101 )-> List[Any]:
"""simple docstring"""
lowercase__ = length
def __len__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
return self.length
def __getitem__( self : List[Any] , a : Union[str, Any] )-> int:
"""simple docstring"""
return i
class SCREAMING_SNAKE_CASE :
def __call__( self : str , a : str )-> Optional[Any]:
"""simple docstring"""
return {"input_ids": torch.tensor(a ), "labels": torch.tensor(a )}
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__ = nn.Linear(120 , 80 )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Tuple , a : Any=None )-> Optional[int]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
lowercase__ = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f"""--output_dir {output_dir}""".split()
lowercase__ = ['torchrun'] + distributed_args + args
execute_subprocess_async(a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f"""--output_dir {output_dir}""".split()
lowercase__ = ['torchrun'] + distributed_args + args
execute_subprocess_async(a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase_ = HfArgumentParser((TrainingArguments,))
lowercase_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowercase_ = DummyDataset(dataset_length)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = list(range(len(_SCREAMING_SNAKE_CASE ) ) )
lowercase__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
lowercase_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase_ = 2
lowercase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase_ = None
| 235 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase_ ( snake_case_,snake_case_ = 16 ):
'''simple docstring'''
_A : List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_A : Optional[int] = load_dataset("""glue""","""mrpc""" )
def tokenize_function(snake_case_ ):
# max_length=None => use the model max length (it's actually the default)
_A : List[Any] = tokenizer(examples["""sentence1"""],examples["""sentence2"""],truncation=A__,max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_A : int = datasets.map(
A__,batched=A__,remove_columns=["""idx""", """sentence1""", """sentence2"""],)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A : int = tokenized_datasets.rename_column("""label""","""labels""" )
def collate_fn(snake_case_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_A : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_A : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
_A : List[str] = 8
else:
_A : Union[str, Any] = None
return tokenizer.pad(
A__,padding="""longest""",max_length=A__,pad_to_multiple_of=A__,return_tensors="""pt""",)
# Instantiate dataloaders.
_A : Dict = DataLoader(
tokenized_datasets["""train"""],shuffle=A__,collate_fn=A__,batch_size=A__ )
_A : Any = DataLoader(
tokenized_datasets["""validation"""],shuffle=A__,collate_fn=A__,batch_size=A__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""",A__ ) == "1":
_A : Optional[Any] = 2
# Initialize accelerator
_A : List[Any] = Accelerator(cpu=args.cpu,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A : List[Any] = config["""lr"""]
_A : Optional[Any] = int(config["""num_epochs"""] )
_A : str = int(config["""seed"""] )
_A : str = int(config["""batch_size"""] )
_A : Optional[int] = evaluate.load("""glue""","""mrpc""" )
# If the batch size is too big we use gradient accumulation
_A : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_A : Tuple = batch_size // MAX_GPU_BATCH_SIZE
_A : Dict = MAX_GPU_BATCH_SIZE
set_seed(A__ )
_A , _A : List[Any] = get_dataloaders(A__,A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""",return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_A : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_A : Tuple = AdamW(params=model.parameters(),lr=A__ )
# Instantiate scheduler
_A : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=A__,num_warmup_steps=100,num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A , _A , _A , _A , _A : Optional[int] = accelerator.prepare(
A__,A__,A__,A__,A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_A : int = model(**A__ )
_A : List[str] = outputs.loss
_A : Tuple = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_A : Union[str, Any] = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_A : Optional[int] = model(**A__ )
_A : Any = outputs.logits.argmax(dim=-1 )
_A , _A : Optional[int] = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_A : List[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_A : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A__,references=A__,)
_A : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''',A__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
_A : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""",type=A__,default=A__,choices=["""no""", """fp16""", """bf16""", """fp8"""],help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""",)
parser.add_argument("""--cpu""",action="""store_true""",help="""If passed, will train on the CPU.""" )
_A : List[Any] = parser.parse_args()
_A : Union[str, Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A__,A__ )
if __name__ == "__main__":
main()
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase : Optional[Any] = None
try:
import msvcrt
except ImportError:
lowercase : Union[str, Any] = None
try:
import fcntl
except ImportError:
lowercase : str = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase : List[Any] = OSError
# Data
# ------------------------------------------------
lowercase : Optional[int] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowercase : int = '3.0.12'
lowercase : Optional[int] = None
def __a ( ) -> Any:
global _logger
lowerCAmelCase = _logger or logging.getLogger(__name__ )
return _logger
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = lock_file
return None
def __str__( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = f"The file lock '{self.lock_file}' could not be acquired."
return temp
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = lock
return None
def __enter__( self : List[Any] ) -> str:
"""simple docstring"""
return self.lock
def __exit__( self : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
"""simple docstring"""
self.lock.release()
return None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int]=-1 , SCREAMING_SNAKE_CASE : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
lowerCAmelCase = self.hash_filename_if_too_long(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# The path to the lock file.
lowerCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCAmelCase = None
# The default timeout value.
lowerCAmelCase = timeout
# We use this lock primarily for the lock counter.
lowerCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCAmelCase = 0
return None
@property
def __A ( self : int ) -> Dict:
"""simple docstring"""
return self._lock_file
@property
def __A ( self : List[Any] ) -> Dict:
"""simple docstring"""
return self._timeout
@timeout.setter
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
lowerCAmelCase = float(SCREAMING_SNAKE_CASE )
return None
def __A ( self : int ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
def __A ( self : Any ) -> int:
"""simple docstring"""
raise NotImplementedError()
@property
def __A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : List[str]=0.0_5 ) -> int:
"""simple docstring"""
if timeout is None:
lowerCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCAmelCase = id(self )
lowerCAmelCase = self._lock_file
lowerCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(SCREAMING_SNAKE_CASE )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCAmelCase = id(self )
lowerCAmelCase = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
lowerCAmelCase = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[int]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
self.release()
return None
def __del__( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.release(force=SCREAMING_SNAKE_CASE )
return None
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
lowerCAmelCase = os.path.basename(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > max_length and max_length > 0:
lowerCAmelCase = os.path.dirname(SCREAMING_SNAKE_CASE )
lowerCAmelCase = str(hash(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = filename[: max_length - len(SCREAMING_SNAKE_CASE ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return path
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=-1 , SCREAMING_SNAKE_CASE : Optional[int]=None ) -> Dict:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE , max_filename_length=SCREAMING_SNAKE_CASE )
lowerCAmelCase = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCAmelCase = os.open(self._lock_file , SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
try:
msvcrt.locking(SCREAMING_SNAKE_CASE , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = fd
return None
def __A ( self : Tuple ) -> str:
"""simple docstring"""
lowerCAmelCase = self._lock_file_fd
lowerCAmelCase = None
msvcrt.locking(SCREAMING_SNAKE_CASE , msvcrt.LK_UNLCK , 1 )
os.close(SCREAMING_SNAKE_CASE )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str]=-1 , SCREAMING_SNAKE_CASE : List[Any]=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = os.statvfs(os.path.dirname(SCREAMING_SNAKE_CASE ) ).f_namemax
super().__init__(SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE , max_filename_length=SCREAMING_SNAKE_CASE )
def __A ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCAmelCase = os.open(self._lock_file , SCREAMING_SNAKE_CASE )
try:
fcntl.flock(SCREAMING_SNAKE_CASE , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = fd
return None
def __A ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase = self._lock_file_fd
lowerCAmelCase = None
fcntl.flock(SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
os.close(SCREAMING_SNAKE_CASE )
return None
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __A ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCAmelCase = os.open(self._lock_file , SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
lowerCAmelCase = fd
return None
def __A ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
os.close(self._lock_file_fd )
lowerCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase : Optional[int] = None
if msvcrt:
lowercase : int = WindowsFileLock
elif fcntl:
lowercase : str = UnixFileLock
else:
lowercase : Optional[int] = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 649 |
'''simple docstring'''
def __a ( A__ = 1000 ) -> int:
lowerCAmelCase = 3
lowerCAmelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 649 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 | def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
__snake_case : int = 1
__snake_case : Any = 2
while i * i <= n:
__snake_case : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = 1
__snake_case : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(__SCREAMING_SNAKE_CASE ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 390 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
a_ = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
a_ = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
a_ = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowercase ( datasets.Metric):
"""simple docstring"""
def __UpperCamelCase (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__=False ):
if return_pvalue:
snake_case_ : Optional[Any] = pearsonr(lowercase__ , lowercase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase__ , lowercase__ )[0] )}
| 480 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ):
super().__init__(
lowercase__ , split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , )
snake_case_ : Tuple = path_or_paths if isinstance(lowercase__ , lowercase__ ) else {self.split: path_or_paths}
snake_case_ : Dict = Text(
cache_dir=lowercase__ , data_files=lowercase__ , features=lowercase__ , **lowercase__ , )
def __UpperCamelCase (self ):
# Build iterable dataset
if self.streaming:
snake_case_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ : str = None
snake_case_ : int = None
snake_case_ : int = None
snake_case_ : int = None
self.builder.download_and_prepare(
download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , )
snake_case_ : Dict = self.builder.as_dataset(
split=self.split , verification_mode=lowercase__ , in_memory=self.keep_in_memory )
return dataset
| 480 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=64 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Optional[Any]:
a : str = parent
a : Union[str, Any] = batch_size
a : Dict = seq_length
a : Optional[Any] = is_training
a : Optional[int] = use_input_mask
a : str = use_token_type_ids
a : Optional[int] = use_labels
a : Dict = vocab_size
a : Dict = hidden_size
a : int = num_hidden_layers
a : List[str] = num_attention_heads
a : Tuple = intermediate_size
a : List[Any] = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Optional[int] = type_vocab_size
a : Optional[Any] = type_sequence_label_size
a : Optional[int] = initializer_range
a : List[str] = num_labels
a : Any = num_choices
a : Dict = scope
a : Any = vocab_size - 1
def __a ( self ) -> Any:
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Optional[Any] = None
if self.use_input_mask:
a : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : int = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self ) -> Any:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __a ( self ) -> int:
a, a, a, a : Optional[int] = self.prepare_config_and_inputs()
a : Any = True
return config, input_ids, input_mask, token_labels
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Dict = GPTNeoXModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : int = True
a : List[Any] = GPTNeoXModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : List[str] = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : str = self.num_labels
a : List[str] = GPTNeoXForQuestionAnswering(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : List[str] = self.num_labels
a : Dict = GPTNeoXForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : Any = self.num_labels
a : Tuple = GPTNeoXForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
a : Tuple = True
a : int = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
a : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
a : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a : str = torch.cat([input_ids, next_tokens] , dim=-1 )
a : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
a : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
a : int = output_from_no_past["hidden_states"][0]
a : Union[str, Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0]
# select random slice
a : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
a : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Tuple:
a : List[str] = self.prepare_config_and_inputs()
a, a, a, a : Any = config_and_inputs
a : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
lowerCamelCase : int =(
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : str =(GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase : int =(
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Optional[Any] =False
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : str =False
def __a ( self ) -> Optional[int]:
a : Tuple = GPTNeoXModelTester(self )
a : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=64 , num_attention_heads=8 )
def __a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> Union[str, Any]:
a, a, a, a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> int:
a, a, a, a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
a, a, a, a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
a : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a, a, a, a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
def __a ( self ) -> str:
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def __a ( self ) -> str:
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def __a ( self ) -> Union[str, Any]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __a ( self , lowerCAmelCase__ ) -> str:
a, a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a : Union[str, Any] = ids_tensor([1, 10] , config.vocab_size )
a : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a : List[Any] = GPTNeoXModel(lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
original_model.eval()
a : Tuple = original_model(lowerCAmelCase__ ).last_hidden_state
a : Dict = original_model(lowerCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a : List[Any] = {"type": scaling_type, "factor": 10.0}
a : List[Any] = GPTNeoXModel(lowerCAmelCase__ )
scaled_model.to(lowerCAmelCase__ )
scaled_model.eval()
a : str = scaled_model(lowerCAmelCase__ ).last_hidden_state
a : Optional[int] = scaled_model(lowerCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> int:
a : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
a : List[str] = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCAmelCase__ )
a : str = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCAmelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
a : List[str] = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
a : List[str] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=20 )
a : str = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 31 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def lowercase__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ort.SessionOptions()
lowercase__ : Optional[Any] = False
return options
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""")
lowercase__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""")
lowercase__ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = """A red cat sitting on a park bench"""
lowercase__ : List[str] = np.random.RandomState(0)
lowercase__ : Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
lowercase__ : Dict = output.images
lowercase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowercase__ : List[str] = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""")
lowercase__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""")
lowercase__ : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""")
lowercase__ : Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : str = """A red cat sitting on a park bench"""
lowercase__ : Optional[int] = np.random.RandomState(0)
lowercase__ : Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
lowercase__ : Optional[int] = output.images
lowercase__ : Tuple = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowercase__ : Union[str, Any] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 12 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = 'MobileNetV1Config'
# Base docstring
__lowerCAmelCase = 'google/mobilenet_v1_1.0_224'
__lowerCAmelCase = [1, 1_024, 7, 7]
# Image classification docstring
__lowerCAmelCase = 'google/mobilenet_v1_1.0_224'
__lowerCAmelCase = 'tabby, tabby cat'
__lowerCAmelCase = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
_snake_case = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = model.mobilenet_va
else:
_snake_case = model
_snake_case = """MobilenetV1/Conv2d_0/"""
_snake_case = backbone.conv_stem.convolution.weight
_snake_case = backbone.conv_stem.normalization.bias
_snake_case = backbone.conv_stem.normalization.weight
_snake_case = backbone.conv_stem.normalization.running_mean
_snake_case = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_snake_case = i + 1
_snake_case = i * 2
_snake_case = backbone.layer[pt_index]
_snake_case = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
_snake_case = pointer.convolution.weight
_snake_case = pointer.normalization.bias
_snake_case = pointer.normalization.weight
_snake_case = pointer.normalization.running_mean
_snake_case = pointer.normalization.running_var
_snake_case = backbone.layer[pt_index + 1]
_snake_case = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
_snake_case = pointer.convolution.weight
_snake_case = pointer.normalization.bias
_snake_case = pointer.normalization.weight
_snake_case = pointer.normalization.running_mean
_snake_case = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_snake_case = model.classifier.weight
_snake_case = model.classifier.bias
return tf_to_pt_map
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_snake_case = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
_snake_case = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
_snake_case = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = array
# Build TF to PyTorch weights loading map
_snake_case = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
_snake_case = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_snake_case = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_snake_case = array.squeeze().transpose()
else:
_snake_case = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
_snake_case = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/RMSProp""" , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/RMSProp_1""" , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/ExponentialMovingAverage""" , _SCREAMING_SNAKE_CASE )
logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case, _snake_case = features.shape[-2:]
_snake_case, _snake_case = conv_layer.stride
_snake_case, _snake_case = conv_layer.kernel_size
if in_height % stride_height == 0:
_snake_case = max(kernel_height - stride_height , 0 )
else:
_snake_case = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_snake_case = max(kernel_width - stride_width , 0 )
else:
_snake_case = max(kernel_width - (in_width % stride_width) , 0 )
_snake_case = pad_along_width // 2
_snake_case = pad_along_width - pad_left
_snake_case = pad_along_height // 2
_snake_case = pad_along_height - pad_top
_snake_case = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """constant""" , 0.0 )
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = True , ) -> None:
super().__init__()
_snake_case = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
_snake_case = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_snake_case = nn.Convad(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=UpperCAmelCase , groups=UpperCAmelCase , bias=UpperCAmelCase , padding_mode="""zeros""" , )
if use_normalization:
_snake_case = nn.BatchNormad(
num_features=UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=UpperCAmelCase , track_running_stats=UpperCAmelCase , )
else:
_snake_case = None
if use_activation:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase ):
_snake_case = ACTaFN[config.hidden_act]
else:
_snake_case = config.hidden_act
else:
_snake_case = None
def lowercase (self , UpperCAmelCase ) -> torch.Tensor:
if self.config.tf_padding:
_snake_case = apply_tf_padding(UpperCAmelCase , self.convolution )
_snake_case = self.convolution(UpperCAmelCase )
if self.normalization is not None:
_snake_case = self.normalization(UpperCAmelCase )
if self.activation is not None:
_snake_case = self.activation(UpperCAmelCase )
return features
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = "mobilenet_v1"
lowerCAmelCase_ = "pixel_values"
lowerCAmelCase_ = False
def lowercase (self , UpperCAmelCase ) -> None:
if isinstance(UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowerCAmelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , __snake_case , )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase = True ) -> Dict:
super().__init__(UpperCAmelCase )
_snake_case = config
_snake_case = 32
_snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
_snake_case = MobileNetVaConvLayer(
UpperCAmelCase , in_channels=config.num_channels , out_channels=UpperCAmelCase , kernel_size=3 , stride=2 , )
_snake_case = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_snake_case = nn.ModuleList()
for i in range(13 ):
_snake_case = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=1 , ) )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowercase (self , UpperCAmelCase ) -> Dict:
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_snake_case = self.conv_stem(UpperCAmelCase )
_snake_case = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_snake_case = layer_module(UpperCAmelCase )
if output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
_snake_case = hidden_states
if self.pooler is not None:
_snake_case = torch.flatten(self.pooler(UpperCAmelCase ) , start_dim=1 )
else:
_snake_case = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=UpperCAmelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __snake_case , )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> None:
super().__init__(UpperCAmelCase )
_snake_case = config.num_labels
_snake_case = MobileNetVaModel(UpperCAmelCase )
_snake_case = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_snake_case = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase )
_snake_case = nn.Linear(UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.mobilenet_va(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(self.dropout(UpperCAmelCase ) )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = """single_label_classification"""
else:
_snake_case = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states , ) | 585 | 0 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase ( UpperCAmelCase ) ->List[Tuple[int, ...]]:
"""simple docstring"""
a_ = []
if isinstance(__snake_case , __snake_case ):
for v in tree.values():
shapes.extend(_fetch_dims(__snake_case ) )
elif isinstance(__snake_case , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__snake_case ) )
elif isinstance(__snake_case , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple[int, ...]:
"""simple docstring"""
a_ = []
for d in reversed(__snake_case ):
idx.append(flat_idx % d )
a_ = flat_idx // d
return tuple(reversed(__snake_case ) )
@torch.jit.ignore
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) ->List[Tuple[slice, ...]]:
"""simple docstring"""
def reduce_edge_list(UpperCAmelCase ) -> None:
a_ = True
for i in range(len(__snake_case ) ):
a_ = -1 * (i + 1)
l[reversed_idx] &= tally
a_ = l[reversed_idx]
if start_edges is None:
a_ = [s == 0 for s in start]
reduce_edge_list(__snake_case )
if end_edges is None:
a_ = [e == (d - 1) for e, d in zip(__snake_case , __snake_case )]
reduce_edge_list(__snake_case )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__snake_case ) == 0:
return [()]
elif len(__snake_case ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a_ = []
a_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__snake_case , __snake_case ):
if s == e:
path_list.append(slice(__snake_case , s + 1 ) )
else:
break
a_ = tuple(__snake_case )
a_ = len(__snake_case )
# start == end, and we're done
if divergence_idx == len(__snake_case ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a_ = start[divergence_idx]
return tuple(
path + (slice(__snake_case , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a_ = end[divergence_idx]
return tuple(
path + (slice(__snake_case , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->torch.Tensor:
"""simple docstring"""
a_ = t.shape[:no_batch_dims]
a_ = list(_flat_idx_to_idx(__snake_case , __snake_case ) )
# _get_minimal_slice_set is inclusive
a_ = list(_flat_idx_to_idx(flat_end - 1 , __snake_case ) )
# Get an ordered list of slices to perform
a_ = _get_minimal_slice_set(
__snake_case , __snake_case , __snake_case , )
a_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = False , ) ->Any:
"""simple docstring"""
if not (len(__snake_case ) > 0):
raise ValueError("Must provide at least one input" )
a_ = [shape[:no_batch_dims] for shape in _fetch_dims(__snake_case )]
a_ = tuple([max(__snake_case ) for s in zip(*__snake_case )] )
def _prep_inputs(UpperCAmelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a_ = tensor_tree_map(_prep_inputs , __snake_case )
a_ = None
if _out is not None:
a_ = tensor_tree_map(lambda UpperCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCAmelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a_ = 0
a_ = prepped_outputs
for _ in range(__snake_case ):
# Chunk the input
if not low_mem:
a_ = _select_chunk
else:
a_ = partial(
_chunk_slice , flat_start=__snake_case , flat_end=min(__snake_case , i + chunk_size ) , no_batch_dims=len(__snake_case ) , )
a_ = tensor_tree_map(__snake_case , __snake_case )
# Run the layer on the chunk
a_ = layer(**__snake_case )
# Allocate space for the output
if out is None:
a_ = tensor_tree_map(lambda UpperCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __snake_case )
# Put the chunk in its pre-allocated space
if isinstance(__snake_case , __snake_case ):
def assign(UpperCAmelCase , UpperCAmelCase ) -> None:
for k, v in da.items():
if isinstance(__snake_case , __snake_case ):
assign(__snake_case , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a_ = da[k]
assign(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
for xa, xa in zip(__snake_case , __snake_case ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a_ = xa
elif isinstance(__snake_case , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a_ = tensor_tree_map(lambda UpperCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) , __snake_case )
return out
class snake_case :
def __init__( self , __UpperCAmelCase = 5_12 , ) ->List[Any]:
a_ = max_chunk_size
a_ = None
a_ = None
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
logging.info("Tuning chunk size...")
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
a_ = [c for c in candidates if c > min_chunk_size]
a_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__UpperCAmelCase) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a)
return True
except RuntimeError:
return False
a_ = 0
a_ = len(__a) - 1
while i > min_viable_chunk_size_index:
a_ = test_chunk_size(candidates[i])
if not viable:
a_ = (min_viable_chunk_size_index + i) // 2
else:
a_ = i
a_ = (i + len(__a) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->bool:
a_ = True
for aa, aa in zip(__a , __a):
assert type(__a) == type(__a)
if isinstance(__a , (list, tuple)):
consistent &= self._compare_arg_caches(__a , __a)
elif isinstance(__a , __a):
a_ = [v for _, v in sorted(aa.items() , key=lambda __UpperCAmelCase: x[0])]
a_ = [v for _, v in sorted(aa.items() , key=lambda __UpperCAmelCase: x[0])]
consistent &= self._compare_arg_caches(__a , __a)
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->int:
a_ = True
a_ = tree_map(lambda __UpperCAmelCase: a.shape if isinstance(__a , torch.Tensor) else a , __a , __a)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(__a)
a_ = self._compare_arg_caches(self.cached_arg_data , __a)
else:
# Otherwise, we can reuse the precomputed value
a_ = False
if not consistent:
a_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
a_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size | 712 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
a_ = 0
for ch in input_str:
a_ = ord(UpperCAmelCase )
a_ = pow(2 , UpperCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 210 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : Tuple = IFImgaImgSuperResolutionPipeline
_lowercase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""})
_lowercase : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
a__ : List[Any] =torch.manual_seed(lowerCAmelCase__ )
else:
a__ : str =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a__ : Tuple =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
a__ : List[str] =floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
a__ : Any ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowercase ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowercase ( self ) -> str:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
self._test_save_load_local()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 563 |
from __future__ import annotations
from math import pow, sqrt
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) + pow(SCREAMING_SNAKE_CASE , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Dict = out_indices if out_indices is not None else [4]
UpperCamelCase : List[str] = stage_names
UpperCamelCase : Any = out_features
UpperCamelCase : List[Any] = backbone
UpperCamelCase : Tuple = batch_size
UpperCamelCase : int = image_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Optional[int] = use_pretrained_backbone
UpperCamelCase : Optional[int] = is_training
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values
def _lowercase ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = TimmBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase : List[str] = config_and_inputs
UpperCamelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = (TimmBackbone,) if is_torch_available() else ()
__UpperCamelCase : List[str] = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
__UpperCamelCase : str = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Any = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = TimmBackboneModelTester(self )
UpperCamelCase : Tuple = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = '''resnet18'''
UpperCamelCase : List[Any] = '''microsoft/resnet-18'''
UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] )
UpperCamelCase : str = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Tuple = [*signature.parameters.keys()]
UpperCamelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
UpperCamelCase : Optional[int] = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCamelCase : Optional[Any] = self.all_model_classes[0]
UpperCamelCase : Tuple = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCamelCase : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCamelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = model(**__SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCamelCase : Optional[int] = copy.deepcopy(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = None
UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCamelCase : int = copy.deepcopy(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = False
UpperCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : int = model(**__SCREAMING_SNAKE_CASE )
| 643 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643 | 1 |
import cmath
import math
def UpperCAmelCase__ ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
'''simple docstring'''
lowerCAmelCase : Tuple = math.radians(_A )
lowerCAmelCase : int = math.radians(_A )
# Convert voltage and current to rectangular form
lowerCAmelCase : Dict = cmath.rect(_A , _A )
lowerCAmelCase : str = cmath.rect(_A , _A )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
__lowerCamelCase = VideoMAEConfig()
set_architecture_configs(_A , _A )
if "finetuned" not in model_name:
__lowerCamelCase = False
if "finetuned" in model_name:
__lowerCamelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCamelCase = 400
__lowerCamelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCamelCase = 174
__lowerCamelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCamelCase = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(_A ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase__ ( _A: Optional[int] , _A: List[str] ):
'''simple docstring'''
if "small" in model_name:
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 3
__lowerCamelCase = 192
__lowerCamelCase = 768
elif "large" in model_name:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 8
__lowerCamelCase = 512
__lowerCamelCase = 2048
elif "huge" in model_name:
__lowerCamelCase = 1280
__lowerCamelCase = 5120
__lowerCamelCase = 32
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 8
__lowerCamelCase = 640
__lowerCamelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
if "encoder." in name:
__lowerCamelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCamelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCamelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCamelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCamelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCamelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCamelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCamelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCamelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCamelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCamelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""head""" , """classifier""" )
return name
def UpperCamelCase__ ( _A: List[str] , _A: Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(_A )
if key.startswith("""encoder.""" ):
__lowerCamelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCamelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCamelCase = config.decoder_hidden_size
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = config.hidden_size
__lowerCamelCase = int(key_split[1] )
__lowerCamelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val
return orig_state_dict
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCamelCase = np.load(_A )
return list(_A )
def UpperCamelCase__ ( _A: Dict , _A: Optional[Any] , _A: Tuple , _A: int ):
'''simple docstring'''
__lowerCamelCase = get_videomae_config(_A )
if "finetuned" in model_name:
__lowerCamelCase = VideoMAEForVideoClassification(_A )
else:
__lowerCamelCase = VideoMAEForPreTraining(_A )
# download original checkpoint, hosted on Google Drive
__lowerCamelCase = """pytorch_model.bin"""
gdown.cached_download(_A , _A , quiet=_A )
__lowerCamelCase = torch.load(_A , map_location="""cpu""" )
if "model" in files:
__lowerCamelCase = files["""model"""]
else:
__lowerCamelCase = files["""module"""]
__lowerCamelCase = convert_state_dict(_A , _A )
model.load_state_dict(_A )
model.eval()
# verify model on basic input
__lowerCamelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(_A , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCamelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCamelCase = torch.load(_A )
__lowerCamelCase = model(**_A )
__lowerCamelCase = outputs.logits
__lowerCamelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCamelCase = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _A , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCamelCase = outputs.loss
assert torch.allclose(_A , _A , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
model.save_pretrained(_A )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(_A , organization="""nielsr""" )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a : List[str] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 479 | 0 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "sigmoid"
UpperCAmelCase__ : List[Any] = "softmax"
UpperCAmelCase__ : Tuple = "none"
@add_end_docstrings(
A_ , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : int = ClassificationFunction.NONE
def __init__( self , **A_ ) -> Optional[int]:
super().__init__(**A_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _a ( self , A_=None , A_=None , A_="" , **A_ ) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__UpperCamelCase =tokenizer_kwargs
__UpperCamelCase ={}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
__UpperCamelCase =self.model.config.return_all_scores
if isinstance(A_ , A_ ) or top_k is None:
__UpperCamelCase =top_k
__UpperCamelCase =False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , A_ , )
if return_all_scores:
__UpperCamelCase =None
else:
__UpperCamelCase =1
if isinstance(A_ , A_ ):
__UpperCamelCase =ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__UpperCamelCase =function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *A_ , **A_ ) -> Optional[int]:
__UpperCamelCase =super().__call__(*A_ , **A_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__UpperCamelCase ='top_k' not in kwargs
if isinstance(args[0] , A_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _a ( self , A_ , **A_ ) -> Dict[str, GenericTensor]:
__UpperCamelCase =self.framework
if isinstance(A_ , A_ ):
return self.tokenizer(**A_ , return_tensors=A_ , **A_ )
elif isinstance(A_ , A_ ) and len(A_ ) == 1 and isinstance(inputs[0] , A_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=A_ , **A_ )
elif isinstance(A_ , A_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(A_ , return_tensors=A_ , **A_ )
def _a ( self , A_ ) -> Union[str, Any]:
return self.model(**A_ )
def _a ( self , A_ , A_=None , A_=1 , A_=True ) -> Optional[Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__UpperCamelCase =ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__UpperCamelCase =ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
__UpperCamelCase =self.model.config.function_to_apply
else:
__UpperCamelCase =ClassificationFunction.NONE
__UpperCamelCase =model_outputs['logits'][0]
__UpperCamelCase =outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__UpperCamelCase =sigmoid(A_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__UpperCamelCase =softmax(A_ )
elif function_to_apply == ClassificationFunction.NONE:
__UpperCamelCase =outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__UpperCamelCase =[
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(A_ )
]
if not _legacy:
dict_scores.sort(key=lambda A_ : x["score"] , reverse=A_ )
if top_k is not None:
__UpperCamelCase =dict_scores[:top_k]
return dict_scores
| 682 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowercase_ (snake_case_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'dpr'
def __init__( self : List[str] ,lowercase__ : List[Any]=3_0_5_2_2 ,lowercase__ : List[str]=7_6_8 ,lowercase__ : int=1_2 ,lowercase__ : List[str]=1_2 ,lowercase__ : Optional[int]=3_0_7_2 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : Any=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : int=5_1_2 ,lowercase__ : Dict=2 ,lowercase__ : List[Any]=0.0_2 ,lowercase__ : Optional[Any]=1e-1_2 ,lowercase__ : Any=0 ,lowercase__ : Any="absolute" ,lowercase__ : int = 0 ,**lowercase__ : Optional[Any] ,):
super().__init__(pad_token_id=__UpperCamelCase ,**__UpperCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = projection_dim
__lowercase = position_embedding_type
| 41 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _snake_case :
'''simple docstring'''
def __init__( self: Any , __UpperCamelCase: List[Any] ) -> Dict:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__magic_name__ : Optional[int] = deepcopy(__UpperCamelCase )
elif os.path.exists(__UpperCamelCase ):
with io.open(__UpperCamelCase , "r" , encoding="utf-8" ) as f:
__magic_name__ : Optional[int] = json.load(__UpperCamelCase )
else:
try:
__magic_name__ : str = baseaa.urlsafe_baadecode(__UpperCamelCase ).decode("utf-8" )
__magic_name__ : int = json.loads(__UpperCamelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
__magic_name__ : Optional[Any] = config
self.set_stage_and_offload()
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
__magic_name__ : List[str] = self.get_value("zero_optimization.stage" , -1 )
# offload
__magic_name__ : Tuple = False
if self.is_zeroa() or self.is_zeroa():
__magic_name__ : List[str] = set(["cpu", "nvme"] )
__magic_name__ : Dict = set(
[
self.get_value("zero_optimization.offload_optimizer.device" ),
self.get_value("zero_optimization.offload_param.device" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__magic_name__ : List[str] = True
def lowerCAmelCase__ ( self: Optional[Any] , __UpperCamelCase: str ) -> Optional[int]:
__magic_name__ : Tuple = self.config
# find the config node of interest if it exists
__magic_name__ : int = ds_key_long.split("." )
__magic_name__ : List[Any] = nodes.pop()
for node in nodes:
__magic_name__ : List[Any] = config.get(__UpperCamelCase )
if config is None:
return None, ds_key
return config, ds_key
def lowerCAmelCase__ ( self: str , __UpperCamelCase: Dict , __UpperCamelCase: int=None ) -> Union[str, Any]:
__magic_name__ , __magic_name__ : int = self.find_config_node(__UpperCamelCase )
if config is None:
return default
return config.get(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self: str , __UpperCamelCase: Optional[int] , __UpperCamelCase: Tuple=False ) -> Tuple:
__magic_name__ : List[str] = self.config
# find the config node of interest if it exists
__magic_name__ : Any = ds_key_long.split("." )
for node in nodes:
__magic_name__ : Dict = config
__magic_name__ : Union[str, Any] = config.get(__UpperCamelCase )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__UpperCamelCase )
def lowerCAmelCase__ ( self: Dict , __UpperCamelCase: Optional[Any] ) -> List[Any]:
__magic_name__ : List[Any] = self.get_value(__UpperCamelCase )
return False if value is None else bool(__UpperCamelCase )
def lowerCAmelCase__ ( self: Dict , __UpperCamelCase: List[str] ) -> Tuple:
__magic_name__ : List[Any] = self.get_value(__UpperCamelCase )
return False if value is None else not bool(__UpperCamelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
return self._stage == 2
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
return self._stage == 3
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
return self._offload
class _snake_case :
'''simple docstring'''
def __init__( self: List[str] , __UpperCamelCase: Union[str, Any] ) -> Tuple:
__magic_name__ : Tuple = engine
def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: int , **__UpperCamelCase: Union[str, Any] ) -> Tuple:
# runs backpropagation and handles mixed precision
self.engine.backward(__UpperCamelCase , **__UpperCamelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _snake_case ( snake_case_ ):
'''simple docstring'''
def __init__( self: List[str] , __UpperCamelCase: Optional[int] ) -> List[Any]:
super().__init__(__UpperCamelCase , device_placement=__UpperCamelCase , scaler=__UpperCamelCase )
__magic_name__ : Any = hasattr(self.optimizer , "overflow" )
def lowerCAmelCase__ ( self: List[str] , __UpperCamelCase: List[str]=None ) -> Union[str, Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _snake_case ( snake_case_ ):
'''simple docstring'''
def __init__( self: List[Any] , __UpperCamelCase: str , __UpperCamelCase: Dict ) -> Any:
super().__init__(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self: int ) -> Dict:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[Any] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: str=0.0_0_1 , __UpperCamelCase: List[Any]=0 , **__UpperCamelCase: List[str] ) -> Union[str, Any]:
__magic_name__ : List[Any] = params
__magic_name__ : List[Any] = lr
__magic_name__ : List[str] = weight_decay
__magic_name__ : Any = kwargs
class _snake_case :
'''simple docstring'''
def __init__( self: List[str] , __UpperCamelCase: List[Any] , __UpperCamelCase: int=None , __UpperCamelCase: int=0 , **__UpperCamelCase: Optional[Any] ) -> str:
__magic_name__ : Optional[int] = optimizer
__magic_name__ : Any = total_num_steps
__magic_name__ : int = warmup_num_steps
__magic_name__ : Dict = kwargs | 436 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = analyze_text(_A )
lowerCAmelCase_ = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase_ = sum(single_char_strings.values() )
# one length string
lowerCAmelCase_ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase_ = single_char_strings[ch]
lowerCAmelCase_ = my_str / all_sum
my_fir_sum += prob * math.loga(_A ) # entropy formula.
# print entropy
print(f"{round(-1 * my_fir_sum ):.1f}" )
# two len string
lowerCAmelCase_ = sum(two_char_strings.values() )
lowerCAmelCase_ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase_ = cha + cha
if sequence in two_char_strings:
lowerCAmelCase_ = two_char_strings[sequence]
lowerCAmelCase_ = int(_A ) / all_sum
my_sec_sum += prob * math.loga(_A )
# print second entropy
print(f"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = Counter() # type: ignore
lowerCAmelCase_ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_A ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 710 |
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [int(_A ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(_A ) == 4 and all(0 <= int(_A ) <= 254 for octet in octets )
if __name__ == "__main__":
_A = input().strip()
_A = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 325 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ : Optional[int] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
UpperCAmelCase__ : Optional[int] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = SqueezeBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> Tuple:
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCamelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , UpperCamelCase__) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase__) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase__) != tokenize_chinese_chars
):
UpperCamelCase__ : Dict = getattr(UpperCamelCase__ , normalizer_state.pop('type'))
UpperCamelCase__ : str = do_lower_case
UpperCamelCase__ : Optional[Any] = strip_accents
UpperCamelCase__ : Tuple = tokenize_chinese_chars
UpperCamelCase__ : List[Any] = normalizer_class(**UpperCamelCase__)
UpperCamelCase__ : Union[str, Any] = do_lower_case
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase=None) -> Tuple:
UpperCamelCase__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase = None) -> List[Any]:
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase = None) -> Any:
UpperCamelCase__ : List[str] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__)
return tuple(UpperCamelCase__)
| 410 | from statistics import mean, stdev
def A ( _lowercase , _lowercase = 3 ):
SCREAMING_SNAKE_CASE : int = min(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = max(_lowercase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowercase ) for x in data]
def A ( _lowercase , _lowercase = 3 ):
SCREAMING_SNAKE_CASE : Optional[Any] = mean(_lowercase )
SCREAMING_SNAKE_CASE : Any = stdev(_lowercase )
# standardize data
return [round((x - mu) / (sigma) , _lowercase ) for x in data]
| 248 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=6.0 , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=None , UpperCamelCase_="fp4" , UpperCamelCase_=False , **UpperCamelCase_ , ):
lowercase_ :Union[str, Any] = load_in_abit
lowercase_ :Any = load_in_abit
lowercase_ :Dict = llm_inta_threshold
lowercase_ :Union[str, Any] = llm_inta_skip_modules
lowercase_ :str = llm_inta_enable_fpaa_cpu_offload
lowercase_ :List[str] = llm_inta_has_fpaa_weight
lowercase_ :Union[str, Any] = bnb_abit_quant_type
lowercase_ :Tuple = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase_ :Dict = torch.floataa
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Tuple = getattr(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , torch.dtype ):
lowercase_ :Any = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def UpperCamelCase ( self ):
if not isinstance(self.llm_inta_threshold , UpperCamelCase_ ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCamelCase_ ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCamelCase_ ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , UpperCamelCase_ ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , UpperCamelCase_ ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , UpperCamelCase_ ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def UpperCamelCase ( self ):
return self.load_in_abit or self.load_in_abit
def UpperCamelCase ( self ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCamelCase ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
lowercase_ :Any = cls(**UpperCamelCase_ )
lowercase_ :str = []
for key, value in kwargs.items():
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
to_remove.append(UpperCamelCase_ )
for key in to_remove:
kwargs.pop(UpperCamelCase_ , UpperCamelCase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCamelCase ( self , UpperCamelCase_ ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
lowercase_ :Optional[Any] = self.to_dict()
lowercase_ :Union[str, Any] = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + '''\n'''
writer.write(UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Tuple = copy.deepcopy(self.__dict__ )
lowercase_ :Dict = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self ):
return f"{self.__class__.__name__} {self.to_json_string()}"
def UpperCamelCase ( self , UpperCamelCase_ = True ):
if use_diff is True:
lowercase_ :Optional[int] = self.to_diff_dict()
else:
lowercase_ :List[Any] = self.to_dict()
return json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + "\n"
def UpperCamelCase ( self ):
lowercase_ :Dict = self.to_dict()
# get the default config dict
lowercase_ :Any = BitsAndBytesConfig().to_dict()
lowercase_ :Optional[int] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase_ :int = value
return serializable_config_dict
| 441 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
SCREAMING_SNAKE_CASE : int = False
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
return 12
@property
def UpperCamelCase ( self ):
return 12
@property
def UpperCamelCase ( self ):
return 32
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = 12
lowercase_ :List[Any] = 12
lowercase_ :Dict = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowercase_ :int = TransformeraDModel(**UpperCamelCase_ )
return model
def UpperCamelCase ( self ):
lowercase_ :List[str] = '''cpu'''
lowercase_ :int = self.dummy_vqvae
lowercase_ :int = self.dummy_text_encoder
lowercase_ :Any = self.dummy_tokenizer
lowercase_ :Optional[int] = self.dummy_transformer
lowercase_ :List[str] = VQDiffusionScheduler(self.num_embed )
lowercase_ :int = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase_ )
lowercase_ :List[Any] = VQDiffusionPipeline(
vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
lowercase_ :Union[str, Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Dict = '''teddy bear playing in the pool'''
lowercase_ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Any = output.images
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :str = pipe(
[prompt] , generator=UpperCamelCase_ , output_type='''np''' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0]
lowercase_ :Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ :Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase_ :str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :int = '''cpu'''
lowercase_ :Dict = self.dummy_vqvae
lowercase_ :str = self.dummy_text_encoder
lowercase_ :List[Any] = self.dummy_tokenizer
lowercase_ :Any = self.dummy_transformer
lowercase_ :Optional[Any] = VQDiffusionScheduler(self.num_embed )
lowercase_ :List[str] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowercase_ :Optional[int] = VQDiffusionPipeline(
vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
lowercase_ :Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :int = '''teddy bear playing in the pool'''
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Optional[Any] = output.images
lowercase_ :Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Dict = pipe(
[prompt] , generator=UpperCamelCase_ , output_type='''np''' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0]
lowercase_ :List[str] = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase_ :Dict = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowercase_ :int = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowercase_ :Tuple = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowercase_ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :int = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 441 | 1 |
from collections import deque
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = len(__lowerCAmelCase )
snake_case__ = deque()
snake_case__ = [False for _ in range(__lowerCAmelCase )]
snake_case__ = [-1 for _ in range(__lowerCAmelCase )]
snake_case__ = index_of[:]
def strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = index # the number when this node is seen
snake_case__ = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCAmelCase )
snake_case__ = True
for w in g[v]:
if index_of[w] == -1:
snake_case__ = strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
snake_case__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
snake_case__ = []
snake_case__ = stack.pop()
snake_case__ = False
component.append(__lowerCAmelCase )
while w != v:
snake_case__ = stack.pop()
snake_case__ = False
component.append(__lowerCAmelCase )
components.append(__lowerCAmelCase )
return index
snake_case__ = []
for v in range(__lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(__lowerCAmelCase , 0 , __lowerCAmelCase )
return components
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = [[] for _ in range(__lowerCAmelCase )]
for u, v in edges:
g[u].append(__lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
lowerCamelCase__ : Tuple = 7
lowerCamelCase__ : Optional[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowerCamelCase__ : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowerCamelCase__ : int = [(u, v) for u, v in zip(source, target)]
lowerCamelCase__ : List[str] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 33 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
snake_case_ : List[str] = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : List[Any] = use_input_lengths
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Tuple = gelu_activation
snake_case_ : Optional[Any] = sinusoidal_embeddings
snake_case_ : str = causal
snake_case_ : Dict = asm
snake_case_ : Optional[Any] = n_langs
snake_case_ : Optional[int] = vocab_size
snake_case_ : Tuple = n_special
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Optional[int] = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : str = num_labels
snake_case_ : Optional[int] = num_choices
snake_case_ : List[Any] = summary_type
snake_case_ : Optional[int] = use_proj
snake_case_ : Union[str, Any] = scope
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Union[str, Any] = None
if self.use_input_lengths:
snake_case_ : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ : Any = None
snake_case_ : Union[str, Any] = None
snake_case_ : Dict = None
if self.use_labels:
snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self ) -> List[str]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
snake_case_ : List[Any] = FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Tuple:
snake_case_ : List[Any] = FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
snake_case_ : Optional[Any] = FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Tuple = model(_SCREAMING_SNAKE_CASE )
snake_case_ : int = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[Any]:
snake_case_ : int = FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Tuple = model(_SCREAMING_SNAKE_CASE )
snake_case_ : Any = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ : Dict = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((snake_case_) , ) : List[str] = result_with_labels.to_tuple()
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((snake_case_) , ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Tuple:
snake_case_ : Tuple = FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
snake_case_ : int = self.num_labels
snake_case_ : List[str] = FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
snake_case_ : str = self.num_choices
snake_case_ : Any = FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : int = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : str = config_and_inputs
snake_case_ : List[str] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A : Tuple = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
snake_case_ : Tuple = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
snake_case_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Optional[Any] = FlaubertModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> List[str]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[int] = FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ , snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
snake_case_ : Optional[int] = True
snake_case_ : str = model_class(config=_SCREAMING_SNAKE_CASE )
snake_case_ : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : int = torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , "traced_model.pt" ) )
snake_case_ : Tuple = torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , "traced_model.pt" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["input_ids"].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["attention_mask"].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[Any] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
snake_case_ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )[0]
snake_case_ : List[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 568 | 0 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a : Optional[int] = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum ):
__SCREAMING_SNAKE_CASE = """all_checks"""
__SCREAMING_SNAKE_CASE = """basic_checks"""
__SCREAMING_SNAKE_CASE = """no_checks"""
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
def __UpperCAmelCase ( _UpperCAmelCase : Optional[dict] , _UpperCAmelCase : dict , _UpperCAmelCase : int=None ) -> Union[str, Any]:
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
__snake_case = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__snake_case = " for " + verification_name if verification_name is not None else ""
if len(_UpperCAmelCase ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
def __UpperCAmelCase ( _UpperCAmelCase : Optional[dict] , _UpperCAmelCase : dict ) -> List[str]:
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
__snake_case = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase ) )
logger.info("All the splits matched successfully." )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : bool = True ) -> dict:
if record_checksum:
__snake_case = shaaaa()
with open(_UpperCAmelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(_UpperCAmelCase )
__snake_case = m.hexdigest()
else:
__snake_case = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase ), "checksum": checksum}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 680 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 1 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if len(A ) <= 1:
return lst
lowercase__ : Any= 1
while i < len(A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase__, lowercase__ : Any= lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase__ : Any= 1
return lst
if __name__ == "__main__":
a : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
a : List[Any] = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 218 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase__(A ) ->list[list[float]]:
"""simple docstring"""
lowercase__ : str= Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowercase__ : int= float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
lowercase__ : Tuple= [[0.0, 0.0], [0.0, 0.0]]
lowercase__, lowercase__ : Dict= matrix[1][1], matrix[0][0]
lowercase__, lowercase__ : Dict= -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowercase__ : str= float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
lowercase__ : List[str]= [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowercase__ : Union[str, Any]= (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowercase__ : Optional[int]= -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowercase__ : List[str]= (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowercase__ : Union[str, Any]= -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowercase__ : Optional[Any]= (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowercase__ : Union[str, Any]= -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowercase__ : Dict= (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowercase__ : Tuple= -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowercase__ : Tuple= (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowercase__ : Dict= array(A )
for i in range(3 ):
for j in range(3 ):
lowercase__ : int= cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowercase__ : str= array(A )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A )
# Calculate the inverse of the matrix
return [[float(d(A ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 218 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = 1_0
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Optional[int] = [1, 2, 3, 4]
__UpperCamelCase : Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def a_ (self ) -> str:
__UpperCamelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
__UpperCamelCase : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
__UpperCamelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Dict = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
__UpperCamelCase , __UpperCamelCase : List[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = ""
__UpperCamelCase , __UpperCamelCase : Tuple = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
__UpperCamelCase , __UpperCamelCase : Optional[Any] = process_story(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : str = ["It was the best of times."]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self ) -> List[Any]:
__UpperCamelCase : str = torch.tensor([1, 2, 3, 4] )
__UpperCamelCase : Any = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def a_ (self ) -> Dict:
__UpperCamelCase : Any = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
__UpperCamelCase : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 2_3 ).numpy() , expected.numpy() )
def a_ (self ) -> List[Any]:
__UpperCamelCase : Union[str, Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__UpperCamelCase : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = 1_0_1
__UpperCamelCase : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
__UpperCamelCase : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__UpperCamelCase : Dict = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase )
| 399 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 399 | 1 |
'''simple docstring'''
def __lowerCAmelCase ( a_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE : List[Any] = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def __lowerCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
'''simple docstring'''
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 251 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 400 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase = word.split()
def justify(SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ) -> str:
lowerCAmelCase = max_width - width
lowerCAmelCase = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase , _lowercase , _lowercase ) )
# reset new line and new width
lowerCAmelCase , lowerCAmelCase = [word], len(_lowercase )
lowerCAmelCase = max_width - width - len(_lowercase )
answer.append(""" """.join(_lowercase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 718 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE__ = os.path.join(git_repo_path, "src", "transformers")
SCREAMING_SNAKE_CASE__ = "\n{0} = None\n"
SCREAMING_SNAKE_CASE__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
SCREAMING_SNAKE_CASE__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(lowercase )
lowerCAmelCase = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(lowercase , """tokenizers""" )
lowerCAmelCase = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(lowercase , """tensorflow_text""" )
lowerCAmelCase = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(lowercase , """sentencepiece_and_tokenizers""" )
lowerCAmelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(lowercase , """sentencepiece_and_tensorflow_text""" )
lowerCAmelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(lowercase , """sentencepiece_and_tokenizers_and_vision""" )
def _snake_case ( self ) -> str:
lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , lowercase )
self.assertIn("""tensorflow_text""" , lowercase )
self.assertIn("""sentencepiece_and_tokenizers""" , lowercase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def _snake_case ( self ) -> int:
lowerCAmelCase = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(lowercase , """\nCONSTANT = None\n""" )
lowerCAmelCase = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
lowercase , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
lowerCAmelCase = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(lowercase , lowercase )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , lowercase )
| 393 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Dict = "bert"
def __init__( self, SCREAMING_SNAKE_CASE_=3_0522, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_="absolute", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : str = num_attention_heads
UpperCamelCase : str = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = layer_norm_eps
UpperCamelCase : str = position_embedding_type
UpperCamelCase : List[str] = use_cache
UpperCamelCase : Union[str, Any] = classifier_dropout
class lowerCAmelCase_ ( a__ ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 40 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 1 |
import cmath
import math
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =math.radians(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE =math.radians(SCREAMING_SNAKE_CASE_ )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE =cmath.rect(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE =cmath.rect(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] ,snake_case : Tuple ,snake_case : Tuple=13 ,snake_case : Any=7 ,snake_case : Dict=True ,snake_case : str=True ,snake_case : Optional[Any]=True ,snake_case : Optional[int]=True ,snake_case : List[Any]=99 ,snake_case : Optional[int]=32 ,snake_case : str=5 ,snake_case : Union[str, Any]=4 ,snake_case : str=37 ,snake_case : List[str]="gelu" ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[int]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : Optional[Any]=16 ,snake_case : str=2 ,snake_case : int=0.02 ,snake_case : int=4 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_attention_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_choices
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=snake_case ,)
return config, input_ids, attention_mask
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class a_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =FlaxDistilBertModelTester(self )
@slow
def _lowerCAmelCase ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class_name.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE =model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
@require_flax
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case )[0]
SCREAMING_SNAKE_CASE =(1, 11, 768)
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,snake_case ,atol=1e-4 ) )
| 252 | 0 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
A : int = {
'Salesforce/codegen-350M-mono': 2_048,
}
class lowerCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
A = CodeGenTokenizer
def __init__( self :Union[str, Any] , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :int=None , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str="<|endoftext|>" , lowerCamelCase_ :Any="<|endoftext|>" , lowerCamelCase_ :List[Any]="<|endoftext|>" , lowerCamelCase_ :Union[str, Any]=False , **lowerCamelCase_ :List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
if kwargs.pop("add_bos_token" , lowercase__ ):
UpperCamelCase__ = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
UpperCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase__ ) != add_prefix_space:
UpperCamelCase__ = getattr(lowercase__ , pre_tok_state.pop("type" ) )
UpperCamelCase__ = add_prefix_space
UpperCamelCase__ = pre_tok_class(**lowercase__ )
UpperCamelCase__ = add_prefix_space
def lowerCamelCase__ ( self :Union[str, Any] , *lowerCamelCase_ :Any , **lowerCamelCase_ :Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = kwargs.get("is_split_into_words" , lowercase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCamelCase__ ( self :int , *lowerCamelCase_ :int , **lowerCamelCase_ :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = kwargs.get("is_split_into_words" , lowercase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] = None ) -> int:
"""simple docstring"""
UpperCamelCase__ = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any = False , lowerCamelCase_ :List[Any] = None , lowerCamelCase_ :Optional[Any] = None , **lowerCamelCase_ :Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = super().decode(
token_ids=lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
if truncate_before_pattern is not None and len(lowercase__ ) > 0:
UpperCamelCase__ = self.truncate(lowercase__ , lowercase__ )
return decoded_text
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str ) -> Union[str, Any]:
"""simple docstring"""
def find_re(lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] ):
UpperCamelCase__ = pattern.search(lowercase__ , lowercase__ )
return m.start() if m else -1
UpperCamelCase__ = [re.compile(lowercase__ , re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCamelCase__ = list(re.finditer("^print" , lowercase__ , re.MULTILINE ) )
if len(lowercase__ ) > 1:
UpperCamelCase__ = completion[: prints[1].start()]
UpperCamelCase__ = list(re.finditer("^def" , lowercase__ , re.MULTILINE ) )
if len(lowercase__ ) > 1:
UpperCamelCase__ = completion[: defs[1].start()]
UpperCamelCase__ = 0
UpperCamelCase__ = [
pos for pos in [find_re(lowercase__ , lowercase__ , lowercase__ ) for terminal in terminals] if pos != -1
]
if len(lowercase__ ) > 0:
return completion[: min(lowercase__ )]
else:
return completion | 516 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
SCREAMING_SNAKE_CASE_ : Optional[Any] = xgb.predict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = fetch_california_housing()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = data_handling(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
SCREAMING_SNAKE_CASE_ : int = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
print(F"Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 421 | 0 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCAmelCase( lowercase_ ):
return TrainCommand(lowercase_ )
class __A ( lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def __snake_case ( a__):
"""simple docstring"""
_lowerCamelCase : Any = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=a__ , required=a__ , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=a__ , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=a__ , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=a__ , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=a__ , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=a__ , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=a__ , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=a__ , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=a__ , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=a__ , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=a__ , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=a__ , default=3e-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=a__ , default=1e-08 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=a__)
def __init__( self , a__):
"""simple docstring"""
_lowerCamelCase : int = logging.get_logger('''transformers-cli/training''')
_lowerCamelCase : Tuple = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=a__)
_lowerCamelCase : List[Any] = args.output
_lowerCamelCase : List[str] = args.column_label
_lowerCamelCase : Optional[int] = args.column_text
_lowerCamelCase : Optional[Any] = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""")
if args.task == "text_classification":
_lowerCamelCase : str = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""")
_lowerCamelCase : Union[str, Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCamelCase : str = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""")
_lowerCamelCase : Union[str, Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCamelCase : List[str] = args.validation_split
_lowerCamelCase : Optional[Any] = args.train_batch_size
_lowerCamelCase : int = args.valid_batch_size
_lowerCamelCase : List[str] = args.learning_rate
_lowerCamelCase : Any = args.adam_epsilon
def __snake_case ( self):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __snake_case ( self):
"""simple docstring"""
raise NotImplementedError
def __snake_case ( self):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 613 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCamelCase = 'Create a default config file for Accelerate with only a few flags set.'
def __UpperCAmelCase( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ):
_lowerCamelCase : List[str] = Path(lowercase_ )
path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_lowerCamelCase : List[Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_lowerCamelCase : Optional[int] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
_lowerCamelCase : Any = torch.cuda.device_count()
_lowerCamelCase : str = num_gpus
_lowerCamelCase : Union[str, Any] = False
if num_gpus > 1:
_lowerCamelCase : str = '''MULTI_GPU'''
else:
_lowerCamelCase : List[str] = '''NO'''
elif is_xpu_available() and use_xpu:
_lowerCamelCase : Union[str, Any] = torch.xpu.device_count()
_lowerCamelCase : int = num_xpus
_lowerCamelCase : Dict = False
if num_xpus > 1:
_lowerCamelCase : List[str] = '''MULTI_XPU'''
else:
_lowerCamelCase : Any = '''NO'''
elif is_npu_available():
_lowerCamelCase : List[str] = torch.npu.device_count()
_lowerCamelCase : Optional[int] = num_npus
_lowerCamelCase : str = False
if num_npus > 1:
_lowerCamelCase : List[Any] = '''MULTI_NPU'''
else:
_lowerCamelCase : str = '''NO'''
else:
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Union[str, Any] = '''NO'''
_lowerCamelCase : Any = ClusterConfig(**lowercase_ )
config.to_json_file(lowercase_ )
return path
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : List[str] = parser.add_parser('''default''' , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ )
parser.add_argument(
'''--config_file''' , default=lowercase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=lowercase_ , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=lowercase_ )
return parser
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 613 | 1 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
a__ : Dict = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
a__ : List[str] = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
a__ : int = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=False , __magic_name__=False , __magic_name__=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(__magic_name__ , '' , __magic_name__ ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(__magic_name__ , '' , __magic_name__ ) for x in references] )
else:
_lowerCAmelCase = np.asarray(__magic_name__ )
_lowerCAmelCase = np.asarray(__magic_name__ )
if ignore_case:
_lowerCAmelCase = np.char.lower(__magic_name__ )
_lowerCAmelCase = np.char.lower(__magic_name__ )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans('' , '' , string.punctuation )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans('' , '' , string.digits )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(__magic_name__ ) * 1_0_0}
| 589 |
"""simple docstring"""
from string import ascii_uppercase
a__ : Any = {char: i for i, char in enumerate(ascii_uppercase)}
a__ : str = dict(enumerate(ascii_uppercase))
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = 0
while True:
if x == i:
_lowerCAmelCase = 0
if len(__lowerCamelCase ) == len(__lowerCamelCase ):
break
key += key[i]
i += 1
return key
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ''
_lowerCAmelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_lowerCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ''
_lowerCAmelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_lowerCAmelCase = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = 'THE GERMAN ATTACK'
_lowerCAmelCase = 'SECRET'
_lowerCAmelCase = generate_key(__lowerCamelCase, __lowerCamelCase )
_lowerCAmelCase = cipher_text(__lowerCamelCase, __lowerCamelCase )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__lowerCamelCase, __lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 589 | 1 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=_lowerCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=_lowerCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=_lowerCamelCase , help="where to store parsed gold_data_path file" , )
_lowerCamelCase : Tuple = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
_lowerCamelCase : Union[str, Any] = json.load(_lowerCamelCase )
for dpr_record in tqdm(_lowerCamelCase ):
_lowerCamelCase : Tuple = dpr_record["question"]
_lowerCamelCase : List[str] = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(_lowerCamelCase ) + "\n" )
if __name__ == "__main__":
main() | 386 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = IFInpaintingPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any]=0 ):
'''simple docstring'''
if str(__lowerCAmelCase ).startswith("mps" ):
_lowerCamelCase : Dict = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Dict = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Dict = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" ,reason="float16 requires CUDA" )
def _lowercase ( self: Dict ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowercase ( self: int ):
'''simple docstring'''
self._test_save_load_local()
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,) | 386 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( snake_case__ , snake_case__ , unittest.TestCase ):
A_ : Optional[int] = VQModel
A_ : str = "sample"
@property
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : List[Any]=(32, 32) ) -> str:
SCREAMING_SNAKE_CASE__ :Optional[Any] = 4
SCREAMING_SNAKE_CASE__ :Tuple = 3
SCREAMING_SNAKE_CASE__ :Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
return {"sample": image}
@property
def __lowerCamelCase ( self : Optional[int] ) -> int:
return (3, 32, 32)
@property
def __lowerCamelCase ( self : str ) -> Optional[Any]:
return (3, 32, 32)
def __lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Any = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
SCREAMING_SNAKE_CASE__ :int = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : int ) -> Dict:
pass
def __lowerCamelCase ( self : Tuple ) -> List[Any]:
pass
def __lowerCamelCase ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ :List[str] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(snake_case__ )
SCREAMING_SNAKE_CASE__ :List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCamelCase ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ :str = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(snake_case__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE__ :List[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
SCREAMING_SNAKE_CASE__ :Dict = image.to(snake_case__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :str = model(snake_case__ ).sample
SCREAMING_SNAKE_CASE__ :int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
| 209 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
A__ : Optional[Any] = logging.get_logger(__name__)
# General docstring
A__ : List[str] = 'RegNetConfig'
# Base docstring
A__ : List[Any] = 'facebook/regnet-y-040'
A__ : Any = [1, 1_088, 7, 7]
# Image classification docstring
A__ : Any = 'facebook/regnet-y-040'
A__ : int = 'tabby, tabby cat'
A__ : Any = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : int = 3 , snake_case__ : int = 1 , snake_case__ : int = 1 , snake_case__ : Optional[str] = "relu" , **snake_case__ : Optional[int] , ):
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCamelCase_ : Tuple =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCamelCase_ : Optional[Any] =tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding="VALID" , groups=snake_case__ , use_bias=snake_case__ , name="convolution" , )
lowerCamelCase_ : List[str] =tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCamelCase_ : List[Any] =ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : str ):
lowerCamelCase_ : str =self.convolution(self.padding(snake_case__ ) )
lowerCamelCase_ : int =self.normalization(snake_case__ )
lowerCamelCase_ : int =self.activation(snake_case__ )
return hidden_state
class lowercase__ ( tf.keras.layers.Layer ):
def __init__( self : List[str] , snake_case__ : RegNetConfig , **snake_case__ : List[Any] ):
super().__init__(**snake_case__ )
lowerCamelCase_ : Union[str, Any] =config.num_channels
lowerCamelCase_ : str =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str ):
lowerCamelCase_ : str =shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCamelCase_ : str =tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
lowerCamelCase_ : List[str] =self.embedder(snake_case__ )
return hidden_state
class lowercase__ ( tf.keras.layers.Layer ):
def __init__( self : List[str] , snake_case__ : int , snake_case__ : int = 2 , **snake_case__ : Tuple ):
super().__init__(**snake_case__ )
lowerCamelCase_ : Optional[int] =tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name="convolution" )
lowerCamelCase_ : List[str] =tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def UpperCAmelCase__ ( self : str , snake_case__ : tf.Tensor , snake_case__ : bool = False ):
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class lowercase__ ( tf.keras.layers.Layer ):
def __init__( self : List[str] , snake_case__ : int , snake_case__ : int , **snake_case__ : Optional[int] ):
super().__init__(**snake_case__ )
lowerCamelCase_ : int =tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name="pooler" )
lowerCamelCase_ : Tuple =[
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Tuple ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowerCamelCase_ : Any =self.pooler(snake_case__ )
for layer_module in self.attention:
lowerCamelCase_ : List[str] =layer_module(snake_case__ )
lowerCamelCase_ : str =hidden_state * pooled
return hidden_state
class lowercase__ ( tf.keras.layers.Layer ):
def __init__( self : str , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : Tuple ):
super().__init__(**snake_case__ )
lowerCamelCase_ : Any =in_channels != out_channels or stride != 1
lowerCamelCase_ : str =max(1 , out_channels // config.groups_width )
lowerCamelCase_ : Union[str, Any] =(
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCamelCase_ : int =[
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name="layer.2" ),
]
lowerCamelCase_ : Tuple =ACTaFN[config.hidden_act]
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[Any] ):
lowerCamelCase_ : Dict =hidden_state
for layer_module in self.layers:
lowerCamelCase_ : List[str] =layer_module(snake_case__ )
lowerCamelCase_ : str =self.shortcut(snake_case__ )
hidden_state += residual
lowerCamelCase_ : Optional[int] =self.activation(snake_case__ )
return hidden_state
class lowercase__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : str ):
super().__init__(**snake_case__ )
lowerCamelCase_ : str =in_channels != out_channels or stride != 1
lowerCamelCase_ : Union[str, Any] =max(1 , out_channels // config.groups_width )
lowerCamelCase_ : Any =(
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCamelCase_ : Dict =[
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name="layer.3" ),
]
lowerCamelCase_ : Tuple =ACTaFN[config.hidden_act]
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[Any] ):
lowerCamelCase_ : str =hidden_state
for layer_module in self.layers:
lowerCamelCase_ : List[Any] =layer_module(snake_case__ )
lowerCamelCase_ : Dict =self.shortcut(snake_case__ )
hidden_state += residual
lowerCamelCase_ : List[Any] =self.activation(snake_case__ )
return hidden_state
class lowercase__ ( tf.keras.layers.Layer ):
def __init__( self : str , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 2 , **snake_case__ : Any ):
super().__init__(**snake_case__ )
lowerCamelCase_ : List[Any] =TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCamelCase_ : str =[
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name="layers.0" ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[Any] ):
for layer_module in self.layers:
lowerCamelCase_ : int =layer_module(snake_case__ )
return hidden_state
class lowercase__ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , snake_case__ : RegNetConfig , **snake_case__ : Union[str, Any] ):
super().__init__(**snake_case__ )
lowerCamelCase_ : Dict =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCamelCase_ : Optional[Any] =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F"""stages.{i+1}""" ) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : tf.Tensor , snake_case__ : bool = False , snake_case__ : bool = True ):
lowerCamelCase_ : List[Any] =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase_ : Optional[int] =hidden_states + (hidden_state,)
lowerCamelCase_ : Dict =stage_module(snake_case__ )
if output_hidden_states:
lowerCamelCase_ : Dict =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class lowercase__ ( tf.keras.layers.Layer ):
_UpperCAmelCase :Any = RegNetConfig
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
super().__init__(**snake_case__ )
lowerCamelCase_ : List[str] =config
lowerCamelCase_ : List[str] =TFRegNetEmbeddings(snake_case__ , name="embedder" )
lowerCamelCase_ : Union[str, Any] =TFRegNetEncoder(snake_case__ , name="encoder" )
lowerCamelCase_ : Union[str, Any] =tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name="pooler" )
@unpack_inputs
def UpperCAmelCase__ ( self : List[str] , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , ):
lowerCamelCase_ : List[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ : str =self.embedder(snake_case__ , training=snake_case__ )
lowerCamelCase_ : Dict =self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
lowerCamelCase_ : Optional[int] =encoder_outputs[0]
lowerCamelCase_ : List[Any] =self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
lowerCamelCase_ : str =tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
lowerCamelCase_ : Any =tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCamelCase_ : Optional[int] =tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Union[str, Any] = RegNetConfig
_UpperCAmelCase :str = "regnet"
_UpperCAmelCase :List[Any] = "pixel_values"
@property
def UpperCAmelCase__ ( self : int ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
A__ : Dict = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
A__ : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", snake_case__, )
class lowercase__ ( snake_case__ ):
def __init__( self : List[str] , snake_case__ : RegNetConfig , *snake_case__ : str , **snake_case__ : Dict ):
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
lowerCamelCase_ : Union[str, Any] =TFRegNetMainLayer(snake_case__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : Any=False , ):
lowerCamelCase_ : Optional[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ : List[Any] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ : Optional[int] =self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", snake_case__, )
class lowercase__ ( snake_case__, snake_case__ ):
def __init__( self : int , snake_case__ : RegNetConfig , *snake_case__ : Optional[Any] , **snake_case__ : Optional[int] ):
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
lowerCamelCase_ : Tuple =config.num_labels
lowerCamelCase_ : Any =TFRegNetMainLayer(snake_case__ , name="regnet" )
# classification head
lowerCamelCase_ : Tuple =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self : Any , snake_case__ : tf.Tensor = None , snake_case__ : tf.Tensor = None , snake_case__ : bool = None , snake_case__ : bool = None , snake_case__ : List[str]=False , ):
lowerCamelCase_ : List[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ : str =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ : int =self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
lowerCamelCase_ : str =outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase_ : Dict =self.classifier[0](snake_case__ )
lowerCamelCase_ : Optional[int] =self.classifier[1](snake_case__ )
lowerCamelCase_ : Any =None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
lowerCamelCase_ : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 153 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase__ : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ):
'''simple docstring'''
lowercase__ : int = size if size is not None else {"""height""": 20, """width""": 20}
lowercase__ : int = parent
lowercase__ : Any = batch_size
lowercase__ : str = num_channels
lowercase__ : Union[str, Any] = image_size
lowercase__ : Tuple = min_resolution
lowercase__ : Any = max_resolution
lowercase__ : Tuple = size
lowercase__ : Any = do_normalize
lowercase__ : Optional[int] = do_convert_rgb
lowercase__ : Optional[Any] = [5_12, 10_24, 20_48, 40_96]
lowercase__ : str = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def lowercase__ ( self):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
lowercase__ : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_).raw).convert("""RGB""")
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Tuple = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = PixaStructImageProcessingTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_convert_rgb"""))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processor_tester.prepare_dummy_image()
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
lowercase__ : Optional[Any] = 20_48
lowercase__ : int = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6) , atol=1E-3 , rtol=1E-3))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : Optional[Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : str = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Tuple = image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : List[Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
lowercase__ : List[Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_).flattened_patches
lowercase__ : Any = """Hello"""
lowercase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ , header_text=SCREAMING_SNAKE_CASE_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Dict = image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ , header_text=SCREAMING_SNAKE_CASE_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
lowercase__ : Optional[Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : List[Any] = image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : int = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : Dict = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : str = image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : List[str] = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = PixaStructImageProcessingTester(self , num_channels=4)
lowercase__ : Optional[Any] = 3
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_convert_rgb"""))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : Any = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Optional[Any] = image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 495 |
def UpperCamelCase ( lowercase_ = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 495 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = SMALL_MODEL_IDENTIFIER
__A : Optional[Any] = 'pt'
__A : Optional[int] = 'tf'
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase)
model_tf.save_pretrained(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = 'mock_framework'
# Framework provided - return whatever the user provides
__A : int = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase)
__A : List[str] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase)
__A : str = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase)
__A : str = FeaturesManager.determine_framework(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , self.framework_pt)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase)
__A : Union[str, Any] = FeaturesManager.determine_framework(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , self.framework_tf)
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_UpperCAmelCase):
__A : Tuple = FeaturesManager.determine_framework(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase):
__A : Union[str, Any] = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(_UpperCAmelCase , self.framework_pt)
# PyTorch not in environment -> use TensorFlow
__A : int = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_torch_available' , _UpperCAmelCase):
__A : Any = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(_UpperCAmelCase , self.framework_tf)
# Both in environment -> use PyTorch
__A : List[Any] = MagicMock(return_value=_UpperCAmelCase)
__A : Tuple = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase), patch(
'transformers.onnx.features.is_torch_available' , _UpperCAmelCase):
__A : Union[str, Any] = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(_UpperCAmelCase , self.framework_pt)
# Both not in environment -> raise error
__A : Tuple = MagicMock(return_value=_UpperCAmelCase)
__A : Tuple = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase), patch(
'transformers.onnx.features.is_torch_available' , _UpperCAmelCase):
with self.assertRaises(_UpperCAmelCase):
__A : Optional[int] = FeaturesManager.determine_framework(self.test_model) | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
__lowerCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[Any] = os.path.dirname(os.path.realpath(UpperCAmelCase ) )
_a : Union[str, Any] = os.path.join(UpperCAmelCase , '''words.txt''' )
_a : int = ''''''
with open(UpperCAmelCase ) as f:
_a : Tuple = f.readline()
_a : Dict = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
_a : List[Any] = [
word
for word in [sum(ord(UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCAmelCase )
if __name__ == "__main__":
print(solution()) | 307 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = 'ResNetConfig'
# Base docstring
__lowerCamelCase = 'microsoft/resnet-50'
__lowerCamelCase = [1, 2_048, 7, 7]
# Image classification docstring
__lowerCamelCase = 'microsoft/resnet-50'
__lowerCamelCase = 'tiger cat'
__lowerCamelCase = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 3 , lowercase = 1 , lowercase = "relu" ) -> str:
super().__init__()
_a : str = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_a : Optional[Any] = nn.BatchNormad(lowercase )
_a : int = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__( self , lowercase ) -> Tensor:
_a : Union[str, Any] = self.convolution(lowercase )
_a : List[str] = self.normalization(lowercase )
_a : List[str] = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase ) -> Optional[int]:
super().__init__()
_a : int = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_a : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_a : Union[str, Any] = config.num_channels
def snake_case__( self , lowercase ) -> Tensor:
_a : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_a : Any = self.embedder(lowercase )
_a : Optional[int] = self.pooler(lowercase )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 2 ) -> Dict:
super().__init__()
_a : str = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_a : Union[str, Any] = nn.BatchNormad(lowercase )
def snake_case__( self , lowercase ) -> Tensor:
_a : Optional[int] = self.convolution(lowercase )
_a : Any = self.normalization(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 1 , lowercase = "relu" ) -> List[str]:
super().__init__()
_a : List[str] = in_channels != out_channels or stride != 1
_a : List[Any] = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_a : List[str] = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_a : Dict = ACTaFN[activation]
def snake_case__( self , lowercase ) -> Optional[int]:
_a : List[Any] = hidden_state
_a : Optional[Any] = self.layer(lowercase )
_a : int = self.shortcut(lowercase )
hidden_state += residual
_a : Dict = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 1 , lowercase = "relu" , lowercase = 4 ) -> Dict:
super().__init__()
_a : Union[str, Any] = in_channels != out_channels or stride != 1
_a : Union[str, Any] = out_channels // reduction
_a : List[str] = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_a : Dict = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_a : List[str] = ACTaFN[activation]
def snake_case__( self , lowercase ) -> str:
_a : List[str] = hidden_state
_a : Optional[int] = self.layer(lowercase )
_a : Any = self.shortcut(lowercase )
hidden_state += residual
_a : Union[str, Any] = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , ) -> Optional[int]:
super().__init__()
_a : List[str] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_a : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__( self , lowercase ) -> Tensor:
_a : Optional[int] = input
for layer in self.layers:
_a : Any = layer(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase ) -> Any:
super().__init__()
_a : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_a : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def snake_case__( self , lowercase , lowercase = False , lowercase = True ) -> BaseModelOutputWithNoAttention:
_a : str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : Dict = hidden_states + (hidden_state,)
_a : List[str] = stage_module(lowercase )
if output_hidden_states:
_a : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ResNetConfig
lowercase = '''resnet'''
lowercase = '''pixel_values'''
lowercase = True
def snake_case__( self , lowercase ) -> Any:
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__( self , lowercase , lowercase=False ) -> int:
if isinstance(lowercase , lowercase ):
_a : List[str] = value
__lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , lowercase ) -> int:
super().__init__(lowercase )
_a : Any = config
_a : Optional[int] = ResNetEmbeddings(lowercase )
_a : Any = ResNetEncoder(lowercase )
_a : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__( self , lowercase , lowercase = None , lowercase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[Any] = self.embedder(lowercase )
_a : Tuple = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : str = encoder_outputs[0]
_a : str = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , lowercase ) -> str:
super().__init__(lowercase )
_a : str = config.num_labels
_a : List[str] = ResNetModel(lowercase )
# classification head
_a : Optional[int] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> ImageClassifierOutputWithNoAttention:
_a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_a : str = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : int = outputs.pooler_output if return_dict else outputs[1]
_a : str = self.classifier(lowercase )
_a : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Any = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : Optional[Any] = '''single_label_classification'''
else:
_a : Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
_a : Optional[Any] = MSELoss()
if self.num_labels == 1:
_a : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a : List[str] = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_a : str = CrossEntropyLoss()
_a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : List[Any] = BCEWithLogitsLoss()
_a : List[Any] = loss_fct(lowercase , lowercase )
if not return_dict:
_a : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase ):
def __init__( self , lowercase ) -> str:
super().__init__(lowercase )
super()._init_backbone(lowercase )
_a : Optional[int] = [config.embedding_size] + config.hidden_sizes
_a : Any = ResNetEmbeddings(lowercase )
_a : List[str] = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def snake_case__( self , lowercase , lowercase = None , lowercase = None ) -> BackboneOutput:
_a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_a : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : List[Any] = self.embedder(lowercase )
_a : Tuple = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : str = outputs.hidden_states
_a : Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_a : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 307 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 | 1 |
'''simple docstring'''
import operator
def snake_case ( snake_case : list , snake_case : bool = False , snake_case : list | None = None ) -> list:
"""simple docstring"""
lowerCAmelCase = operator.lt if reverse else operator.gt
lowerCAmelCase = solution or []
if not arr:
return solution
lowerCAmelCase = [arr.pop(0 )]
for i, item in enumerate(snake_case ):
if _operator(snake_case , sublist[-1] ):
sublist.append(snake_case )
arr.pop(snake_case )
# merging sublist into solution list
if not solution:
solution.extend(snake_case )
else:
while sublist:
lowerCAmelCase = sublist.pop(0 )
for i, xx in enumerate(snake_case ):
if not _operator(snake_case , snake_case ):
solution.insert(snake_case , snake_case )
break
else:
solution.append(snake_case )
strand_sort(snake_case , snake_case , snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 719 |
'''simple docstring'''
import torch
def snake_case ( ) -> List[str]:
"""simple docstring"""
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 514 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('''transformers.models.speecht5''')
__UpperCAmelCase = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__UpperCAmelCase = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__UpperCAmelCase = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__UpperCAmelCase = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__UpperCAmelCase = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__UpperCAmelCase = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__UpperCAmelCase = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__UpperCAmelCase = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__UpperCAmelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCAmelCase = []
__UpperCAmelCase = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__UpperCAmelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__UpperCAmelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__UpperCAmelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def lowercase__ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
for attribute in key.split("." ):
a__ : int = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
a__ : Dict = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
a__ : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
a__ : Union[str, Any] = value
elif weight_type == "weight_g":
a__ : int = value
elif weight_type == "weight_v":
a__ : List[str] = value
elif weight_type == "bias":
a__ : int = value
elif weight_type == "running_mean":
a__ : Tuple = value
elif weight_type == "running_var":
a__ : List[str] = value
elif weight_type == "num_batches_tracked":
a__ : Optional[Any] = value
else:
a__ : Tuple = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__ : int = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = []
if task == "s2t":
a__ : int = hf_model.speechta.encoder.prenet.feature_encoder
a__ : List[Any] = MAPPING_S2T
a__ : Tuple = IGNORE_KEYS_S2T
elif task == "t2s":
a__ : str = None
a__ : List[str] = MAPPING_T2S
a__ : str = IGNORE_KEYS_T2S
elif task == "s2s":
a__ : int = hf_model.speechta.encoder.prenet.feature_encoder
a__ : Optional[int] = MAPPING_S2S
a__ : List[str] = IGNORE_KEYS_S2S
else:
raise ValueError(F"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(F"{name} was ignored" )
continue
a__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
a__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
a__ , a__ : Union[str, Any] = key.split(".*." )
if prefix in name and suffix in name:
a__ : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
a__ : Tuple = True
if "*" in mapped_key:
a__ : Optional[Any] = name.split(lowerCAmelCase__ )[0].split("." )[-2]
a__ : Optional[Any] = mapped_key.replace("*" , lowerCAmelCase__ )
if "weight_g" in name:
a__ : List[str] = "weight_g"
elif "weight_v" in name:
a__ : List[Any] = "weight_v"
elif "bias" in name:
a__ : Any = "bias"
elif "weight" in name:
a__ : Any = "weight"
elif "running_mean" in name:
a__ : Optional[Any] = "running_mean"
elif "running_var" in name:
a__ : Optional[int] = "running_var"
elif "num_batches_tracked" in name:
a__ : Optional[int] = "num_batches_tracked"
else:
a__ : int = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"Unused weights: {unused_weights}" )
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = full_name.split("conv_layers." )[-1]
a__ : List[Any] = name.split("." )
a__ : Dict = int(items[0] )
a__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
a__ : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
a__ : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
a__ : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
a__ : List[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
a__ : Any = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Any = SpeechTaConfig()
if task == "s2t":
a__ : int = config.max_text_positions
a__ : Dict = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
a__ : Any = 1_8_7_6
a__ : Optional[int] = 6_0_0
a__ : Optional[int] = config.max_speech_positions
a__ : Any = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
a__ : Dict = 1_8_7_6
a__ : int = config.max_speech_positions
a__ : str = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(F"Unknown task name: {task}" )
if vocab_path:
a__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
a__ : Dict = AddedToken("<mask>" , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
a__ : Optional[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
a__ : Dict = SpeechTaFeatureExtractor()
a__ : Optional[Any] = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
a__ : Optional[int] = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint["model"] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__UpperCAmelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 642 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__UpperCAmelCase = logging.get_logger(__name__)
class __UpperCAmelCase ( _UpperCamelCase ):
def UpperCAmelCase ( self : int , a_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(a_ , a_ ):
a__ : Any = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , a_ : Tuple , a_ : Optional[Any] , a_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(a_ ) )
if isinstance(a_ , a_ ):
a__ : str = [sequences]
a__ : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : str , a_ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *a_ : Tuple , **a_ : str ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def UpperCAmelCase ( self : Optional[int] , a_ : List[Any] , a_ : int=True , a_ : Tuple=True , a_ : Tuple=TruncationStrategy.ONLY_FIRST , **a_ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : str = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
a__ : List[str] = self.tokenizer.eos_token
try:
a__ : List[str] = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
a__ : List[str] = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self : Tuple , **a_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if kwargs.get("multi_class" , a_ ) is not None:
a__ : str = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
a__ : Tuple = {}
if "candidate_labels" in kwargs:
a__ : Any = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
a__ : str = kwargs["hypothesis_template"]
a__ : Tuple = {}
if "multi_label" in kwargs:
a__ : Dict = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : str , a_ : Union[str, List[str]] , *a_ : List[str] , **a_ : List[Any] , ) -> Tuple:
'''simple docstring'''
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
a__ : Any = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(a_ , **a_ )
def UpperCAmelCase ( self : Optional[int] , a_ : Tuple , a_ : Any=None , a_ : Dict="This example is {}." ) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
a__ : Union[str, Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def UpperCAmelCase ( self : Optional[int] , a_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Dict = inputs["candidate_label"]
a__ : Optional[int] = inputs["sequence"]
a__ : Optional[int] = {k: inputs[k] for k in self.tokenizer.model_input_names}
a__ : int = self.model(**a_ )
a__ : Optional[int] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self : Dict , a_ : Any , a_ : List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = [outputs["candidate_label"] for outputs in model_outputs]
a__ : Optional[int] = [outputs["sequence"] for outputs in model_outputs]
a__ : Union[str, Any] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
a__ : List[str] = logits.shape[0]
a__ : Optional[int] = len(a_ )
a__ : List[str] = N // n
a__ : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
a__ : str = self.entailment_id
a__ : str = -1 if entailment_id == 0 else 0
a__ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
a__ : List[Any] = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
a__ : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
a__ : str = reshaped_outputs[..., self.entailment_id]
a__ : Optional[int] = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
a__ : List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 642 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( lowercase_ : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : int ) -> List[str]:
'''simple docstring'''
lowercase =[]
lowercase , lowercase =input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowercase =result + left + right
return input_list
def UpperCamelCase ( lowercase_ : str ) -> Dict:
'''simple docstring'''
if len(lowerCAmelCase__ ) <= 1:
return input_list
lowercase =list(lowerCAmelCase__ )
# iteration for two-way merging
lowercase =2
while p <= len(lowerCAmelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
lowercase =i
lowercase =i + p - 1
lowercase =(low + high + 1) // 2
lowercase =merge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase__ ):
lowercase =i
lowercase =merge(lowerCAmelCase__ , 0 , lowerCAmelCase__ , len(lowerCAmelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
_UpperCAmelCase : List[str] = []
else:
_UpperCAmelCase : List[str] = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 714 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase =1.5
lowercase =int(factor * num_class_images )
lowercase =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowercase_ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=lowercase_ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase =client.query(text=lowercase_ )
if len(lowercase_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase =int(factor * num_images )
lowercase =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowercase_ , aesthetic_weight=0.1 , )
lowercase =0
lowercase =0
lowercase =tqdm(desc='''downloading real regularization images''' , total=lowercase_ )
with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
f'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
lowercase =class_images[count]
count += 1
try:
lowercase =requests.get(images['''url'''] )
if img.status_code == 2_0_0:
lowercase =Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowercase =argparse.ArgumentParser('''''' , add_help=lowercase_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowercase_ , type=lowercase_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowercase_ , type=lowercase_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_0_0 , type=lowercase_ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 145 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCAmelCase_ ):
A_ = ["transformers", "torch", "note_seq"]
def __init__( self , *__a , **__a ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 476 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.dummy_uncond_unet
__a : Any = DDIMScheduler()
__a : Optional[int] = self.dummy_vq_model
__a : List[str] = LDMPipeline(unet=__a , vqvae=__a , scheduler=__a )
ldm.to(__a )
ldm.set_progress_bar_config(disable=__a )
__a : Dict = torch.manual_seed(0 )
__a : Dict = ldm(generator=__a , num_inference_steps=2 , output_type='numpy' ).images
__a : str = torch.manual_seed(0 )
__a : List[Any] = ldm(generator=__a , num_inference_steps=2 , output_type='numpy' , return_dict=__a )[0]
__a : Optional[Any] = image[0, -3:, -3:, -1]
__a : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
__a : Optional[Any] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(__a )
ldm.set_progress_bar_config(disable=__a )
__a : str = torch.manual_seed(0 )
__a : List[Any] = ldm(generator=__a , num_inference_steps=5 , output_type='numpy' ).images
__a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__a : Any = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
__a : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 476 | 1 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a__ ( lowerCAmelCase__ = True , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCAmelCase__ : List[str] = False
if main_process_only:
UpperCAmelCase__ : Tuple = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ , disable=lowerCAmelCase__ )
| 312 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase__ : str = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase__ : Dict = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
def lowercase_ ( self : List[str] , **_A : Dict ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''tester'''
UpperCAmelCase__ : Tuple = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ : str = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
UpperCAmelCase__ : int = tokenizer.encode([special_token] , add_special_tokens=_A )
self.assertEqual(len(_A ) , 1 )
UpperCAmelCase__ : Any = tokenizer.decode(_A , skip_special_tokens=_A )
self.assertTrue(special_token not in decoded )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.get_input_output_texts(_A )
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
UpperCAmelCase__ : str = tokenizer.convert_tokens_to_ids(_A )
UpperCAmelCase__ : Tuple = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = tokenizer.convert_ids_to_tokens(_A )
self.assertNotEqual(len(_A ) , 0 )
UpperCAmelCase__ : List[Any] = tokenizer.decode(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _A )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
| 312 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCAmelCase_ = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase__ : str = '''https://pypi.org/pypi/diffusers/json'''
UpperCamelCase__ : Dict = json.loads(request.urlopen(__UpperCAmelCase ).read() )['''releases'''].keys()
return sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : version.Version(__UpperCAmelCase ) )
def lowerCAmelCase_ ( ) -> Tuple:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__UpperCAmelCase )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCamelCase__ : int = Path(__UpperCAmelCase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, os.PathLike] ) -> Union[str, Any]:
init_hf_modules()
UpperCamelCase__ : Tuple = Path(__UpperCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCamelCase__ : Any = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> str:
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Optional[Any] = f.read()
# Imports of the form `import .xxx`
UpperCamelCase__ : Optional[Any] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__UpperCAmelCase ) )
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> str:
UpperCamelCase__ : Dict = False
UpperCamelCase__ : Any = [module_file]
UpperCamelCase__ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
UpperCamelCase__ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__UpperCAmelCase ) )
UpperCamelCase__ : List[str] = Path(__UpperCAmelCase ).parent
UpperCamelCase__ : List[str] = [str(module_path / m ) for m in new_imports]
UpperCamelCase__ : str = [f for f in new_import_files if f not in all_relative_imports]
UpperCamelCase__ : List[Any] = [f"{f}.py" for f in new_import_files]
UpperCamelCase__ : List[Any] = len(__UpperCAmelCase ) == 0
all_relative_imports.extend(__UpperCAmelCase )
return all_relative_imports
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Union[str, Any]:
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Dict = f.read()
# Imports of the form `import xxx`
UpperCamelCase__ : str = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
UpperCamelCase__ : str = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
UpperCamelCase__ : List[str] = list(set(__UpperCAmelCase ) )
UpperCamelCase__ : Optional[int] = []
for imp in imports:
try:
importlib.import_module(__UpperCAmelCase )
except ImportError:
missing_packages.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f"{', '.join(__UpperCAmelCase )}. Run `pip install {' '.join(__UpperCAmelCase )}`" )
return get_relative_imports(__UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Union[str, Any] ) -> Dict:
UpperCamelCase__ : Optional[Any] = module_path.replace(os.path.sep , '''.''' )
UpperCamelCase__ : int = importlib.import_module(__UpperCAmelCase )
if class_name is None:
return find_pipeline_class(__UpperCAmelCase )
return getattr(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> Dict:
from ..pipelines import DiffusionPipeline
UpperCamelCase__ : Any = dict(inspect.getmembers(__UpperCAmelCase , inspect.isclass ) )
UpperCamelCase__ : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __UpperCAmelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
f" {loaded_module}." )
UpperCamelCase__ : Optional[int] = cls
return pipeline_class
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, os.PathLike] , __UpperCAmelCase: str , __UpperCAmelCase: Optional[Union[str, os.PathLike]] = None , __UpperCAmelCase: bool = False , __UpperCAmelCase: bool = False , __UpperCAmelCase: Optional[Dict[str, str]] = None , __UpperCAmelCase: Optional[Union[bool, str]] = None , __UpperCAmelCase: Optional[str] = None , __UpperCAmelCase: bool = False , ) -> int:
UpperCamelCase__ : Tuple = str(__UpperCAmelCase )
UpperCamelCase__ : int = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = module_file_or_url
UpperCamelCase__ : Union[str, Any] = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
UpperCamelCase__ : Tuple = get_diffusers_versions()
# cut ".dev0"
UpperCamelCase__ : int = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
UpperCamelCase__ : Optional[int] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
UpperCamelCase__ : str = f"v{revision}"
elif revision == "main":
UpperCamelCase__ : Optional[int] = revision
else:
raise ValueError(
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
f" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
UpperCamelCase__ : Dict = COMMUNITY_PIPELINES_URL.format(revision=__UpperCAmelCase , pipeline=__UpperCAmelCase )
try:
UpperCamelCase__ : str = cached_download(
__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , )
UpperCamelCase__ : Union[str, Any] = '''git'''
UpperCamelCase__ : List[Any] = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
UpperCamelCase__ : str = hf_hub_download(
__UpperCAmelCase , __UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , )
UpperCamelCase__ : Any = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
UpperCamelCase__ : List[str] = check_imports(__UpperCAmelCase )
# Now we move the module inside our cached dynamic modules.
UpperCamelCase__ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__UpperCAmelCase )
UpperCamelCase__ : List[str] = Path(__UpperCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__UpperCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
UpperCamelCase__ : List[Any] = f"{module_needed}.py"
shutil.copy(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCamelCase__ : str = use_auth_token
elif use_auth_token is True:
UpperCamelCase__ : int = HfFolder.get_token()
else:
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Optional[int] = model_info(__UpperCAmelCase , revision=__UpperCAmelCase , token=__UpperCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCamelCase__ : int = submodule_path / commit_hash
UpperCamelCase__ : Union[str, Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__UpperCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__UpperCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__UpperCAmelCase , f"{module_needed}.py" , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , resume_download=__UpperCAmelCase , proxies=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , revision=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, os.PathLike] , __UpperCAmelCase: str , __UpperCAmelCase: Optional[str] = None , __UpperCAmelCase: Optional[Union[str, os.PathLike]] = None , __UpperCAmelCase: bool = False , __UpperCAmelCase: bool = False , __UpperCAmelCase: Optional[Dict[str, str]] = None , __UpperCAmelCase: Optional[Union[bool, str]] = None , __UpperCAmelCase: Optional[str] = None , __UpperCAmelCase: bool = False , **__UpperCAmelCase: str , ) -> List[Any]:
UpperCamelCase__ : Tuple = get_cached_module_file(
__UpperCAmelCase , __UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , resume_download=__UpperCAmelCase , proxies=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , revision=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
return get_class_in_module(__UpperCAmelCase , final_module.replace('''.py''' , '''''' ) )
| 253 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase__ :
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = UNetaDConditionModel(
sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase__ : Dict = DDPMScheduler(
num_train_timesteps=1000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, thresholding=__magic_name__, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = UNetaDConditionModel(
sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', class_embed_type='''timestep''', mid_block_scale_factor=1.414, time_embedding_act_fn='''gelu''', time_embedding_dim=32, )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = DDPMScheduler(
num_train_timesteps=1000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, thresholding=__magic_name__, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, )
torch.manual_seed(0 )
UpperCamelCase__ : str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : str = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Any = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : Tuple = inputs['''prompt''']
UpperCamelCase__ : Optional[Any] = inputs['''generator''']
UpperCamelCase__ : Union[str, Any] = inputs['''num_inference_steps''']
UpperCamelCase__ : Dict = inputs['''output_type''']
if "image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['''image''']
else:
UpperCamelCase__ : Any = None
if "mask_image" in inputs:
UpperCamelCase__ : List[str] = inputs['''mask_image''']
else:
UpperCamelCase__ : Union[str, Any] = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['''original_image''']
else:
UpperCamelCase__ : Tuple = None
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = pipe.encode_prompt(__magic_name__ )
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase__ : int = image
if mask_image is not None:
UpperCamelCase__ : List[Any] = mask_image
if original_image is not None:
UpperCamelCase__ : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__magic_name__, __magic_name__, __magic_name__ )
UpperCamelCase__ : Union[str, Any] = pipe(**__magic_name__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__magic_name__ )
UpperCamelCase__ : Any = self.pipeline_class.from_pretrained(__magic_name__ )
pipe_loaded.to(__magic_name__ )
pipe_loaded.set_progress_bar_config(disable=__magic_name__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__magic_name__, __magic_name__ ) is None, f"`{optional_component}` did not stay set to None after loading.", )
UpperCamelCase__ : List[str] = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : int = inputs['''generator''']
UpperCamelCase__ : Union[str, Any] = inputs['''num_inference_steps''']
UpperCamelCase__ : Union[str, Any] = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Optional[Any] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase__ : List[Any] = image
if mask_image is not None:
UpperCamelCase__ : List[str] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : str = pipe_loaded(**__magic_name__ )[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(__magic_name__ ) - to_np(__magic_name__ ) ).max()
self.assertLess(__magic_name__, 1E-4 )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.get_dummy_components()
UpperCamelCase__ : Optional[Any] = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : Dict = pipe(**__magic_name__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__magic_name__ )
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(__magic_name__ )
pipe_loaded.to(__magic_name__ )
pipe_loaded.set_progress_bar_config(disable=__magic_name__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCamelCase__ : str = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : Optional[Any] = pipe_loaded(**__magic_name__ )[0]
UpperCamelCase__ : str = np.abs(to_np(__magic_name__ ) - to_np(__magic_name__ ) ).max()
self.assertLess(__magic_name__, 1E-4 )
| 253 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : int=30 , UpperCAmelCase_ : Dict=400 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = size if size is not None else {'shortest_edge': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = VivitImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
def __lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 491 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase = ''
_lowerCAmelCase = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(SCREAMING_SNAKE_CASE ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase = [1 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(SCREAMING_SNAKE_CASE )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase = j - k + 1 # noqa: E741
_lowerCAmelCase = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase = length[j]
_lowerCAmelCase = j
# create that string
_lowerCAmelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : List[str] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A ( _a ):
lowercase_ = 'mobilenet_v1'
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Any=2_24 , lowerCAmelCase_ : Tuple=1.0 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : str="relu6" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Any=0.9_9_9 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : List[Any]=0.0_0_1 , **lowerCAmelCase_ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a = num_channels
_a = image_size
_a = depth_multiplier
_a = min_depth
_a = hidden_act
_a = tf_padding
_a = classifier_dropout_prob
_a = initializer_range
_a = layer_norm_eps
class A ( _a ):
lowercase_ = version.parse('1.11' )
@property
def __lowerCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowerCAmelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-4
| 22 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class a_ :
def __init__( self : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=None , snake_case__ : str=None ):
lowerCAmelCase__ = start
lowerCAmelCase__ = end
lowerCAmelCase__ = val
lowerCAmelCase__ = (start + end) // 2
lowerCAmelCase__ = left
lowerCAmelCase__ = right
def __repr__( self : str ):
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class a_ :
def __init__( self : Optional[int] , snake_case__ : Sequence , snake_case__ : List[Any] ):
lowerCAmelCase__ = collection
lowerCAmelCase__ = function
if self.collection:
lowerCAmelCase__ = self._build_tree(0 , len(snake_case__ ) - 1 )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
self._update_tree(self.root , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : str , snake_case__ : List[str] ):
return self._query_range(self.root , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
if start == end:
return SegmentTreeNode(snake_case__ , snake_case__ , self.collection[start] )
lowerCAmelCase__ = (start + end) // 2
lowerCAmelCase__ = self._build_tree(snake_case__ , snake_case__ )
lowerCAmelCase__ = self._build_tree(mid + 1 , snake_case__ )
return SegmentTreeNode(snake_case__ , snake_case__ , self.fn(left.val , right.val ) , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
if node.start == i and node.end == i:
lowerCAmelCase__ = val
return
if i <= node.mid:
self._update_tree(node.left , snake_case__ , snake_case__ )
else:
self._update_tree(node.right , snake_case__ , snake_case__ )
lowerCAmelCase__ = self.fn(node.left.val , node.right.val )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Any ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , snake_case__ , snake_case__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , snake_case__ , node.mid ) , self._query_range(node.right , node.mid + 1 , snake_case__ ) , )
else:
# range in right child tree
return self._query_range(node.right , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
if self.root is not None:
lowerCAmelCase__ = Queue()
queue.put(self.root )
while not queue.empty():
lowerCAmelCase__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
__lowerCAmelCase : str = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 674 | """simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Optional[int] ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__lowerCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 674 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ = None , A_ = None , A_ = False , **A_ , ):
'''simple docstring'''
super().__init__(features=A_ , cache_dir=A_ , keep_in_memory=A_ , **A_ )
SCREAMING_SNAKE_CASE__ = Sql(
cache_dir=A_ , features=A_ , sql=A_ , con=A_ , **A_ , )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , )
# Build dataset for splits
SCREAMING_SNAKE_CASE__ = self.builder.as_dataset(
split='''train''' , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
SCREAMING_SNAKE_CASE__ = dataset
SCREAMING_SNAKE_CASE__ = name
SCREAMING_SNAKE_CASE__ = con
SCREAMING_SNAKE_CASE__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ = num_proc
SCREAMING_SNAKE_CASE__ = to_sql_kwargs
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.to_sql_kwargs.pop('''sql''' , A_ )
SCREAMING_SNAKE_CASE__ = self.to_sql_kwargs.pop('''con''' , A_ )
SCREAMING_SNAKE_CASE__ = self.to_sql_kwargs.pop('''index''' , A_ )
SCREAMING_SNAKE_CASE__ = self._write(index=A_ , **self.to_sql_kwargs )
return written
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
SCREAMING_SNAKE_CASE__ = query_table(
table=self.dataset.data , key=slice(A_ , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ = batch.to_pandas()
SCREAMING_SNAKE_CASE__ = df.to_sql(self.name , self.con , index=A_ , **A_ )
return num_rows or len(A_ )
def lowercase_ ( self , A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , A_ , A_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 100 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def _lowerCamelCase ( *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
pass
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = np.array(__lowerCamelCase )
_lowerCAmelCase = npimg.shape
return {"hash": hashimage(__lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
UpperCamelCase : Tuple = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = MaskGenerationPipeline(model=__magic_name__ , image_processor=__magic_name__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
_lowerCAmelCase = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_5_6 )
# Shortening by hashing
_lowerCAmelCase = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(__magic_name__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'facebook/sam-vit-huge'
_lowerCAmelCase = pipeline('mask-generation' , model=__magic_name__ )
_lowerCAmelCase = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
_lowerCAmelCase = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(__magic_name__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.00_53},
] , )
| 589 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : str , lowerCamelCase : str = "cpu" , lowerCamelCase : str = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_UpperCAmelCase = device
_UpperCAmelCase = CLIPTokenizerFast.from_pretrained(lowerCamelCase )
_UpperCAmelCase = [0.4814_5466, 0.457_8275, 0.4082_1073]
_UpperCAmelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711]
_UpperCAmelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_UpperCAmelCase = torchvision.transforms.Resize(224 )
_UpperCAmelCase = torchvision.transforms.CenterCrop(224 )
def lowerCamelCase ( self : Dict , lowerCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.resize(lowerCamelCase )
_UpperCAmelCase = self.center_crop(lowerCamelCase )
_UpperCAmelCase = self.normalize(lowerCamelCase )
return images
def __call__( self : List[str] , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]=None , **lowerCamelCase : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(text=lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = self.preprocess_img(lowerCamelCase )
_UpperCAmelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : Dict=10 , lowerCamelCase : Tuple=0.01 , lowerCamelCase : int=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : List[str]=None , lowerCamelCase : str=None , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=None , lowerCamelCase : str=False , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : int="image" , lowerCamelCase : str=True , lowerCamelCase : int=False , lowerCamelCase : Dict=False , lowerCamelCase : Union[str, Any]=False , ) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = None
_UpperCAmelCase = device if device else get_device()
if vqgan:
_UpperCAmelCase = vqgan
else:
_UpperCAmelCase = load_vqgan(self.device , conf_path=lowerCamelCase , ckpt_path=lowerCamelCase )
self.vqgan.eval()
if clip:
_UpperCAmelCase = clip
else:
_UpperCAmelCase = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_UpperCAmelCase = ProcessorGradientFlow(device=self.device )
_UpperCAmelCase = iterations
_UpperCAmelCase = lr
_UpperCAmelCase = log
_UpperCAmelCase = make_grid
_UpperCAmelCase = return_val
_UpperCAmelCase = quantize
_UpperCAmelCase = self.vqgan.decoder.z_shape
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : int=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[Any]=5 , lowerCamelCase : Dict=True ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
if output_path is None:
_UpperCAmelCase = """./animation.gif"""
if input_path is None:
_UpperCAmelCase = self.save_path
_UpperCAmelCase = sorted(glob(input_path + """/*""" ) )
if not len(lowerCamelCase ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowerCamelCase ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_UpperCAmelCase = total_duration / len(lowerCamelCase )
_UpperCAmelCase = [frame_duration] * len(lowerCamelCase )
if extend_frames:
_UpperCAmelCase = 1.5
_UpperCAmelCase = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowerCamelCase ) )
imageio.mimsave(lowerCamelCase , lowerCamelCase , duration=lowerCamelCase )
print(f"""gif saved to {output_path}""" )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=None ) -> Tuple:
"""simple docstring"""
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_UpperCAmelCase = preprocess(Image.open(lowerCamelCase ) , target_image_size=256 ).to(self.device )
_UpperCAmelCase = preprocess_vqgan(lowerCamelCase )
_UpperCAmelCase , *_UpperCAmelCase = self.vqgan.encode(lowerCamelCase )
return z
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.latent.detach().requires_grad_()
_UpperCAmelCase = base_latent + transform_vector
if self.quantize:
_UpperCAmelCase , *_UpperCAmelCase = self.vqgan.quantize(lowerCamelCase )
else:
_UpperCAmelCase = trans_latent
return self.vqgan.decode(lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : str=None ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.clip_preprocessor(text=lowerCamelCase , images=lowerCamelCase , return_tensors="""pt""" , padding=lowerCamelCase )
_UpperCAmelCase = self.clip(**lowerCamelCase )
_UpperCAmelCase = clip_outputs.logits_per_image
if weights is not None:
_UpperCAmelCase = similarity_logits * weights
return similarity_logits.sum()
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : str ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCamelCase , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_UpperCAmelCase = self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCamelCase , weights=neg_prompts["""weights"""] )
else:
_UpperCAmelCase = torch.tensor([1] , device=self.device )
_UpperCAmelCase = -torch.log(lowerCamelCase ) + torch.log(lowerCamelCase )
return loss
def lowerCamelCase ( self : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.randn_like(self.latent , requires_grad=lowerCamelCase , device=self.device )
_UpperCAmelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_UpperCAmelCase = self._add_vector(lowerCamelCase )
_UpperCAmelCase = loop_post_process(lowerCamelCase )
_UpperCAmelCase = self._get_CLIP_loss(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print("""CLIP loss""" , lowerCamelCase )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCamelCase ( self : str , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : str ) -> str:
"""simple docstring"""
wandb.init(reinit=lowerCamelCase , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_UpperCAmelCase = Image.open(lowerCamelCase )
_UpperCAmelCase = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(lowerCamelCase ) )
def lowerCamelCase ( self : Dict , lowerCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
if not prompts:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if isinstance(lowerCamelCase , lowerCamelCase ):
_UpperCAmelCase = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowerCamelCase , (tuple, list) ):
_UpperCAmelCase = prompt[0]
_UpperCAmelCase = float(prompt[1] )
elif ":" in prompt:
_UpperCAmelCase , _UpperCAmelCase = prompt.split(""":""" )
_UpperCAmelCase = float(lowerCamelCase )
else:
_UpperCAmelCase = prompt
_UpperCAmelCase = 1.0
processed_prompts.append(lowerCamelCase )
weights.append(lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCamelCase , device=self.device ),
}
def lowerCamelCase ( self : List[str] , lowerCamelCase : int , lowerCamelCase : Dict=None , lowerCamelCase : int=None , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=False , lowerCamelCase : Any=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Tuple=None , ) -> int:
"""simple docstring"""
if image_path:
_UpperCAmelCase = self._get_latent(lowerCamelCase )
else:
_UpperCAmelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCamelCase , lowerCamelCase , lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
_UpperCAmelCase = self.process_prompts(lowerCamelCase )
_UpperCAmelCase = self.process_prompts(lowerCamelCase )
if save_final and save_path is None:
_UpperCAmelCase = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowerCamelCase ):
os.makedirs(lowerCamelCase )
else:
_UpperCAmelCase = save_path + """_""" + get_timestamp()
os.makedirs(lowerCamelCase )
_UpperCAmelCase = save_path
_UpperCAmelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowerCamelCase ) )
_UpperCAmelCase = loop_post_process(lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCamelCase , lowerCamelCase , lowerCamelCase ) ):
if show_intermediate:
show_pil(lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowerCamelCase )} )
if show_final:
show_pil(lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) ) | 402 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a: Optional[Any] = logging.get_logger(__name__)
__a: str = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''bit'''
_lowerCamelCase = ['''preactivation''', '''bottleneck''']
_lowerCamelCase = ['''SAME''', '''VALID''']
def __init__( self : List[Any] , lowerCamelCase : Dict=3 , lowerCamelCase : str=64 , lowerCamelCase : Union[str, Any]=[256, 512, 1024, 2048] , lowerCamelCase : Union[str, Any]=[3, 4, 6, 3] , lowerCamelCase : Optional[int]="preactivation" , lowerCamelCase : Optional[int]="relu" , lowerCamelCase : Optional[int]=None , lowerCamelCase : List[Any]=32 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Any=32 , lowerCamelCase : Tuple=1 , lowerCamelCase : Optional[int]=None , lowerCamelCase : str=None , **lowerCamelCase : Any , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_UpperCAmelCase = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = global_padding
_UpperCAmelCase = num_groups
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = embedding_dynamic_padding
_UpperCAmelCase = output_stride
_UpperCAmelCase = width_factor
_UpperCAmelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names ) | 402 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase_ , unittest.TestCase ):
a__ : Tuple = KandinskyInpaintPipeline
a__ : Tuple = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a__ : Union[str, Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a__ : Any = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ : Tuple = False
@property
def _lowercase (self : int ):
return 32
@property
def _lowercase (self : Any ):
return 32
@property
def _lowercase (self : Dict ):
return self.time_input_dim
@property
def _lowercase (self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def _lowercase (self : List[str] ):
return 100
@property
def _lowercase (self : List[str] ):
UpperCAmelCase_ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _lowercase (self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCAmelCase_ = MultilingualCLIP(__A )
UpperCAmelCase_ = text_encoder.eval()
return text_encoder
@property
def _lowercase (self : str ):
torch.manual_seed(0 )
UpperCAmelCase_ = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ = UNetaDConditionModel(**__A )
return model
@property
def _lowercase (self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase (self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_unet
UpperCAmelCase_ = self.dummy_movq
UpperCAmelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type="epsilon" , thresholding=__A , )
UpperCAmelCase_ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowercase (self : Any , __a : Union[str, Any] , __a : int=0 ):
UpperCAmelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__A ) ).to(__A )
UpperCAmelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__A )
# create init_image
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__A ) ).convert("RGB" ).resize((256, 256) )
# create mask
UpperCAmelCase_ = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase_ = 0
if str(__A ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__A )
else:
UpperCAmelCase_ = torch.Generator(device=__A ).manual_seed(__A )
UpperCAmelCase_ = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__A )
UpperCAmelCase_ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(__A ) )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _lowercase (self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase_ = 0
UpperCAmelCase_ = "a hat"
UpperCAmelCase_ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
UpperCAmelCase_ = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe_prior(
__A , generator=__A , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ = pipeline(
__A , image=__A , mask_image=__A , image_embeds=__A , negative_image_embeds=__A , generator=__A , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
| 78 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowerCamelCase : Dict = """bart"""
__lowerCamelCase : Union[str, Any] = True
@st.cache(allow_output_mutation=snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case__ : str = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case__ : Dict = qar_model.eval()
else:
snake_case__, snake_case__ : str = (None, None)
if MODEL_TYPE == "bart":
snake_case__ : str = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case__ : int = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case__ : str = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case__ : List[Any] = sas_model.eval()
else:
snake_case__, snake_case__ : Any = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
snake_case__ : Optional[int] = faiss.StandardGpuResources()
snake_case__ : int = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
snake_case__ : Tuple = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
snake_case__ : int = faiss.IndexFlatIP(128 )
snake_case__ : Dict = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
snake_case__, snake_case__ : int = (None, None)
snake_case__ : Tuple = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case__ : Dict = elia["train_eli5"]
snake_case__ : Optional[int] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
snake_case__ : List[str] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = load_indexes()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = load_models()
__lowerCamelCase , __lowerCamelCase : List[str] = load_train_data()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[int]=10 ):
snake_case__ : Optional[Any] = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
snake_case__, snake_case__ : int = eli5_train_q_index.search(snake_case_ , snake_case_ )
snake_case__ : Optional[int] = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[Any]="wiki40b" , snake_case_ : Optional[int]="dense" , snake_case_ : List[str]=10 ):
if source == "none":
snake_case__, snake_case__ : Tuple = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case__, snake_case__ : Tuple = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
snake_case__, snake_case__ : Dict = query_es_index(
snake_case_ , snake_case_ , index_name="english_wiki40b_snippets_100w" , n_results=snake_case_ , )
snake_case__ : Optional[Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
snake_case__ : int = "question: {} context: {}".format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=64 , snake_case_ : List[str]=256 , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[Any]=2 , snake_case_ : str=0.95 , snake_case_ : Optional[Any]=0.8 ):
with torch.no_grad():
snake_case__ : List[str] = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
__lowerCamelCase : Dict = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
__lowerCamelCase : Dict = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowerCamelCase : List[Any] = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowerCamelCase : Optional[int] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
__lowerCamelCase : Dict = st.sidebar.checkbox("""Demo options""")
if demo_options:
__lowerCamelCase : Tuple = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
__lowerCamelCase : Optional[Any] = action_list.index(action_st)
__lowerCamelCase : int = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
__lowerCamelCase : List[Any] = show_type == """Show full text of passages"""
else:
__lowerCamelCase : Any = 3
__lowerCamelCase : str = True
__lowerCamelCase : Optional[Any] = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
__lowerCamelCase : Any = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
__lowerCamelCase : List[str] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
__lowerCamelCase : int = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
__lowerCamelCase : Optional[int] = """wiki40b"""
__lowerCamelCase : Optional[Any] = """dense"""
__lowerCamelCase : int = """beam"""
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Any = 64
__lowerCamelCase : List[str] = 256
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = None
__lowerCamelCase : Any = st.sidebar.checkbox("""Generation options""")
if generate_options:
__lowerCamelCase : Optional[Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
__lowerCamelCase : Optional[Any] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
__lowerCamelCase : Optional[Any] = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__lowerCamelCase : List[str] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__lowerCamelCase : Optional[Any] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowerCamelCase : List[str] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowerCamelCase : str = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowerCamelCase : Any = None
# start main text
__lowerCamelCase : Any = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
__lowerCamelCase : Dict = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowerCamelCase : Optional[Any] = st.text_input("""Enter your question here:""", """""")
else:
__lowerCamelCase : List[str] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowerCamelCase , __lowerCamelCase : Tuple = make_support(question, source=wiki_source, method="""dense""", n_results=10)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
__lowerCamelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowerCamelCase : List[str] = support_list[:10]
__lowerCamelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowerCamelCase , __lowerCamelCase : List[str] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
__lowerCamelCase : List[str] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
__lowerCamelCase : str = res[1].strip()
if sec_titles == "":
__lowerCamelCase : Union[str, Any] = """[{}]({})""".format(res[0], wiki_url)
else:
__lowerCamelCase : List[str] = sec_titles.split(""" & """)
__lowerCamelCase : Dict = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
__lowerCamelCase : Optional[Any] = find_nearest_training(question)
__lowerCamelCase : Optional[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
__lowerCamelCase : Union[str, Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
__lowerCamelCase : List[str] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 297 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A : str = logging.get_logger(__name__)
A : str = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = '''perceiver'''
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any]=2_56 , __lowerCAmelCase : List[str]=12_80 , __lowerCAmelCase : Dict=7_68 , __lowerCAmelCase : str=1 , __lowerCAmelCase : str=26 , __lowerCAmelCase : List[Any]=8 , __lowerCAmelCase : Tuple=8 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : str="kv" , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : Dict=1e-12 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[Any]=2_62 , __lowerCAmelCase : Tuple=20_48 , __lowerCAmelCase : str=56 , __lowerCAmelCase : Union[str, Any]=[3_68, 4_96] , __lowerCAmelCase : str=16 , __lowerCAmelCase : str=19_20 , __lowerCAmelCase : Tuple=16 , __lowerCAmelCase : List[Any]=[1, 16, 2_24, 2_24] , **__lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = num_latents
A__ = d_latents
A__ = d_model
A__ = num_blocks
A__ = num_self_attends_per_block
A__ = num_self_attention_heads
A__ = num_cross_attention_heads
A__ = qk_channels
A__ = v_channels
A__ = cross_attention_shape_for_attention
A__ = self_attention_widening_factor
A__ = cross_attention_widening_factor
A__ = hidden_act
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_query_residual
# masked language modeling attributes
A__ = vocab_size
A__ = max_position_embeddings
# image classification attributes
A__ = image_size
# flow attributes
A__ = train_size
# multimodal autoencoding attributes
A__ = num_frames
A__ = audio_samples_per_frame
A__ = samples_per_patch
A__ = output_shape
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def a_ ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-4
def a_ ( self : int , __lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = preprocessor.num_special_tokens_to_add(__lowerCAmelCase )
A__ = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A__ = [""" """.join(["""a"""] ) * seq_length] * batch_size
A__ = dict(preprocessor(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
A__ = inputs.pop("""input_ids""" )
return inputs
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = dict(preprocessor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
A__ = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 700 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : int = 50 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : List[str] , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
A__ = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowerCAmelCase , )
A__ = image.to(self.device )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A__ = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A__ = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=__lowerCAmelCase ), "This is a local test"
| 247 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''AutoImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
snake_case_ : List[Any] = self.image_processor
def __call__( self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ) -> Tuple:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : List[str] = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
snake_case_ : Optional[int] = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 58 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a =logging.get_logger(__name__)
# General docstring
a ="""RegNetConfig"""
# Base docstring
a ="""facebook/regnet-y-040"""
a =[1, 1088, 7, 7]
# Image classification docstring
a ="""facebook/regnet-y-040"""
a ="""tabby, tabby cat"""
a =[
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,)
__lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
__lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = config.num_channels
__lowerCamelCase : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1))
__lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution')
__lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
__lowerCamelCase : Dict = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'),
]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__)
for layer_module in self.attention:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : Dict = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'),
]
__lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : int = hidden_state
for layer_module in self.layers:
__lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1
__lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : int = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'),
]
__lowerCamelCase : List[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowerCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'),
*[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)],
]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
for layer_module in self.layers:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,))
__lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}"))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True):
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,)
__lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__)
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
_UpperCAmelCase : List[Any] = RegNetConfig
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = config
__lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder')
__lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder')
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
@unpack_inputs
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,):
__lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = encoder_outputs[0]
__lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__)
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
__lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = RegNetConfig
_UpperCAmelCase : Optional[int] = '''regnet'''
_UpperCAmelCase : List[Any] = '''pixel_values'''
@property
def lowerCAmelCase ( self : int):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)}
a =r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,):
__lowerCamelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = config.num_labels
__lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
# classification head
__lowerCamelCase : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,):
__lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str = self.regnet(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
| 652 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyVaaInpaintPipeline
SCREAMING_SNAKE_CASE : Optional[int] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
SCREAMING_SNAKE_CASE : List[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
SCREAMING_SNAKE_CASE : Any = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE : List[Any] = False
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1_00
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='epsilon' , thresholding=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
# create init_image
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
lowerCAmelCase = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase = 0
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase = np.ones((7_68, 7_68) , dtype=np.floataa )
lowerCAmelCase = 0
lowerCAmelCase = 'a hat'
lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase = pipeline(
image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 514 |
'''simple docstring'''
def snake_case ( snake_case : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = len(snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def snake_case ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if len(snake_case ) <= 1:
return arr, 0
lowerCAmelCase = len(snake_case ) // 2
lowerCAmelCase = arr[0:mid]
lowerCAmelCase = arr[mid:]
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
lowerCAmelCase , lowerCAmelCase = _count_cross_inversions(snake_case , snake_case )
lowerCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def snake_case ( snake_case : Union[str, Any] , snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = 0
while i < len(snake_case ) and j < len(snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , snake_case )
# an empty list should also have zero inversions
lowerCAmelCase = []
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , snake_case )
if __name__ == "__main__":
main()
| 514 | 1 |
def lowerCamelCase__ ( _lowercase , _lowercase = " " ):
'''simple docstring'''
UpperCAmelCase_ : int = []
UpperCAmelCase_ : List[str] = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : List[Any] = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod() | 30 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_UpperCamelCase )
# Let's go
__UpperCAmelCase : int = parser.parse_args()
if not hasattr(_UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : List[str] = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 139 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase__ : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase__ : Optional[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_A ) - np.asarray(_A )) ** 2 ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_A , _A ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase__ ( ):
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
benchmark()
| 139 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[Any]=13 , __lowercase : List[Any]=7 , __lowercase : List[str]=True , __lowercase : Optional[Any]=True , __lowercase : Any=True , __lowercase : Optional[int]=True , __lowercase : int=99 , __lowercase : str=24 , __lowercase : Tuple=2 , __lowercase : Union[str, Any]=6 , __lowercase : List[str]=37 , __lowercase : int="gelu" , __lowercase : List[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Any=5_12 , __lowercase : Optional[int]=16 , __lowercase : int=2 , __lowercase : Tuple=0.02 , __lowercase : int=3 , __lowercase : Union[str, Any]=None , __lowercase : List[str]=10_00 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = range_bbox
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self : str ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case__ ( self : List[str] , __lowercase : Any , __lowercase : Tuple , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : int , ):
"""simple docstring"""
snake_case_ = LiltModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
snake_case_ = model(__lowercase , bbox=__lowercase , token_type_ids=__lowercase )
snake_case_ = model(__lowercase , bbox=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self : Optional[int] , __lowercase : Dict , __lowercase : int , __lowercase : List[Any] , __lowercase : str , __lowercase : List[str] , __lowercase : Dict , __lowercase : Optional[Any] , ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LiltForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(
__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Any , __lowercase : int , __lowercase : Optional[Any] , ):
"""simple docstring"""
snake_case_ = LiltForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(
__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : List[Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : int ):
"""simple docstring"""
return True
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = LiltModelTester(self )
snake_case_ = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def snake_case__ ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
@slow
def snake_case__ ( self : Any ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LiltModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(__lowercase )
snake_case_ = torch.tensor([[1, 2]] , device=__lowercase )
snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowercase )
# forward pass
with torch.no_grad():
snake_case_ = model(input_ids=__lowercase , bbox=__lowercase )
snake_case_ = torch.Size([1, 2, 7_68] )
snake_case_ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , __lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowercase , atol=1E-3 ) )
| 139 | 1 |
from __future__ import annotations
def lowercase ( _lowerCAmelCase ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowerCAmelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowerCAmelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case__ : Tuple = pd.read_csv('''sample_data.csv''', header=None)
snake_case__ : List[str] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case__ : Dict = df.iloc[:, 1:2]
snake_case__ : List[str] = actual_data.values.reshape(len_data, 1)
snake_case__ : Union[str, Any] = MinMaxScaler().fit_transform(actual_data)
snake_case__ : Tuple = 1_0
snake_case__ : str = 5
snake_case__ : Any = 2_0
snake_case__ : Union[str, Any] = len_data - periods * look_back
snake_case__ : Union[str, Any] = actual_data[:division]
snake_case__ : Optional[Any] = actual_data[division - look_back :]
snake_case__ , snake_case__ : Dict = [], []
snake_case__ , snake_case__ : Dict = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case__ : int = np.array(train_x)
snake_case__ : List[str] = np.array(test_x)
snake_case__ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case__ : int = np.array([list(i.ravel()) for i in test_y])
snake_case__ : List[Any] = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
snake_case__ : List[str] = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
snake_case__ : Optional[int] = model.predict(x_test)
| 392 | 1 |
_lowerCAmelCase : Union[str, Any] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def UpperCAmelCase_ ( snake_case__ ) -> bytes:
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(snake_case__ )
lowerCAmelCase__ = ''.join(bin(snake_case__ )[2:].zfill(8 ) for byte in data )
lowerCAmelCase__ = len(snake_case__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCAmelCase__ = B'=' * ((6 - len(snake_case__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(snake_case__ ) % 6)
else:
lowerCAmelCase__ = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(snake_case__ ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase_ ( snake_case__ ) -> bytes:
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = (
'argument should be a bytes-like object or ASCII string, '
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(snake_case__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(snake_case__ , snake_case__ ):
try:
lowerCAmelCase__ = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
lowerCAmelCase__ = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(snake_case__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCAmelCase__ = encoded_data[:-padding]
lowerCAmelCase__ = ''.join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCAmelCase__ = ''.join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )
lowerCAmelCase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(snake_case__ ) , 8 )
]
return bytes(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] ,dtype=tf.floataa ,)
lowerCAmelCase__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] ,dtype=tf.intaa ,) # expected non filtered idx as noted above
lowerCAmelCase__ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] ,dtype=tf.floataa ,) # expected non filtered values as noted above
lowerCAmelCase__ = tf_top_k_top_p_filtering(a_ ,top_k=10 ,top_p=0.6 ,min_tokens_to_keep=4 )
lowerCAmelCase__ = output[output != -float('inf' )]
lowerCAmelCase__ = tf.cast(
tf.where(tf.not_equal(a_ ,tf.constant(-float('inf' ) ,dtype=tf.floataa ) ) ) ,dtype=tf.intaa ,)
tf.debugging.assert_near(a_ ,a_ ,rtol=1e-1_2 )
tf.debugging.assert_equal(a_ ,a_ )
@require_tf
class __snake_case ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
SCREAMING_SNAKE_CASE__ = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
class __snake_case ( tf.Module ):
def __init__( self ,a_ ):
"""simple docstring"""
super(a_ ,self ).__init__()
lowerCAmelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((None, input_length) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model.generate(
input_ids=a_ ,attention_mask=a_ ,max_new_tokens=a_ ,return_dict_in_generate=a_ ,)
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ = [[2, 0], [102, 103]]
lowerCAmelCase__ = [[1, 0], [1, 1]]
lowerCAmelCase__ = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ ,a_ ,signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase__ = tf.saved_model.load(a_ ).signatures['serving_default']
for batch_size in range(1 ,len(a_ ) + 1 ):
lowerCAmelCase__ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase__ = serving_func(**a_ )['sequences']
lowerCAmelCase__ = test_model.generate(**a_ ,max_new_tokens=a_ )
tf.debugging.assert_equal(a_ ,a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
class __snake_case ( tf.Module ):
def __init__( self ,a_ ):
"""simple docstring"""
super(a_ ,self ).__init__()
lowerCAmelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model.generate(
input_ids=a_ ,attention_mask=a_ ,max_new_tokens=a_ ,return_dict_in_generate=a_ ,)
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ = [[2], [102, 103]]
lowerCAmelCase__ = [[1], [1, 1]]
lowerCAmelCase__ = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ ,a_ ,signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase__ = tf.saved_model.load(a_ ).signatures['serving_default']
for input_row in range(len(a_ ) ):
lowerCAmelCase__ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase__ = serving_func(**a_ )['sequences']
lowerCAmelCase__ = test_model.generate(**a_ ,max_new_tokens=a_ )
tf.debugging.assert_equal(a_ ,a_ )
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' ,filename='spiece.model' ,local_dir=a_ )
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a_ ,'spiece.model' ) ,'rb' ).read() )
lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,*a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.tokenize(a_ )
lowerCAmelCase__ , lowerCAmelCase__ = text.pad_model_inputs(
a_ ,max_seq_length=64 ,pad_value=self.model.config.pad_token_id )
lowerCAmelCase__ = self.model.generate(input_ids=a_ ,attention_mask=a_ )
return self.tokenizer.detokenize(a_ )
lowerCAmelCase__ = CompleteSentenceTransformer()
lowerCAmelCase__ = tf.keras.layers.Input(shape=(1,) ,dtype=tf.string ,name='inputs' )
lowerCAmelCase__ = complete_model(a_ )
lowerCAmelCase__ = tf.keras.Model(a_ ,a_ )
keras_model.save(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase__ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
lowerCAmelCase__ = 14
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 'Hello, my dog is cute and'
lowerCAmelCase__ = tokenizer(a_ ,return_tensors='tf' )
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase__ = model.generate(**a_ ,eos_token_id=a_ ,**a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase__ = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase__ = model.generate(**a_ ,eos_token_id=a_ ,**a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = 'Hugging Face is a technology company based in New York and Paris.'
lowerCAmelCase__ = bart_tokenizer(a_ ,return_tensors='tf' ).input_ids
lowerCAmelCase__ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = bart_model.generate(a_ ).numpy()
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,**a_ ):
"""simple docstring"""
return super().call(a_ ,**a_ )
lowerCAmelCase__ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = bart_model.generate(a_ ,foo='bar' ).numpy()
self.assertTrue(np.array_equal(a_ ,a_ ) )
class __snake_case ( bart_model.model.encoder.__class__ ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
return super().call(a_ ,**a_ )
lowerCAmelCase__ = FakeEncoder(bart_model.config ,bart_model.model.shared )
lowerCAmelCase__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase__ = bart_model.generate(a_ ).numpy()
with self.assertRaises(a_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a_ ,foo='bar' )
| 604 | 1 |
"""simple docstring"""
from __future__ import annotations
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : list[list[int]] ):
_UpperCAmelCase = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(__lowerCAmelCase ) != 0:
_UpperCAmelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__lowerCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__lowerCAmelCase , (int, float) ):
raise error
_UpperCAmelCase = rows
else:
_UpperCAmelCase = []
def lowerCAmelCase_ ( self : Union[str, Any] ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return len(self.rows )
@property
def lowerCAmelCase_ ( self : str ):
return len(self.rows[0] )
@property
def lowerCAmelCase_ ( self : Any ):
return (self.num_rows, self.num_columns)
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.order[0] == self.order[1]
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowerCAmelCase_ ( self : str ):
return bool(self.determinant() )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__lowerCAmelCase ).determinant()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if (row + column) % 2 == 0:
return self.get_minor(__lowerCAmelCase , __lowerCAmelCase )
return -1 * self.get_minor(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
return Matrix(
[
[self.get_minor(__lowerCAmelCase , __lowerCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowerCAmelCase_ ( self : List[Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Any ):
return str(self.rows )
def __str__( self : Dict ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(__lowerCAmelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : list[int] , __lowerCAmelCase : int | None = None ):
_UpperCAmelCase = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise type_error
for value in row:
if not isinstance(__lowerCAmelCase , (int, float) ):
raise type_error
if len(__lowerCAmelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(__lowerCAmelCase )
else:
_UpperCAmelCase = self.rows[0:position] + [row] + self.rows[position:]
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int | None = None ):
_UpperCAmelCase = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise type_error
for value in column:
if not isinstance(__lowerCAmelCase , (int, float) ):
raise type_error
if len(__lowerCAmelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
_UpperCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_UpperCAmelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Any , __lowerCAmelCase : object ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Tuple , __lowerCAmelCase : object ):
return not self == other
def __neg__( self : List[str] ):
return self * -1
def __add__( self : str , __lowerCAmelCase : Matrix ):
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , __lowerCAmelCase : Matrix ):
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : List[str] , __lowerCAmelCase : Matrix | int | float ):
if isinstance(__lowerCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(__lowerCAmelCase , __lowerCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Optional[int] , __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
_UpperCAmelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
return sum(row[i] * column[i] for i in range(len(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 | """simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class a ( lowerCAmelCase_ ):
_snake_case : Union[PIL.Image.Image, np.ndarray]
class a ( lowerCAmelCase_ ):
def __init__( self : Dict , __lowerCAmelCase : PriorTransformer , __lowerCAmelCase : CLIPVisionModel , __lowerCAmelCase : CLIPImageProcessor , __lowerCAmelCase : HeunDiscreteScheduler , __lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=__lowerCAmelCase , image_encoder=__lowerCAmelCase , image_processor=__lowerCAmelCase , scheduler=__lowerCAmelCase , renderer=__lowerCAmelCase , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
if latents is None:
_UpperCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_UpperCAmelCase = latents.to(__lowerCAmelCase )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCAmelCase = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[str] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Any , ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(__lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__lowerCAmelCase , axis=0 )
if not isinstance(__lowerCAmelCase , torch.Tensor ):
_UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__lowerCAmelCase )
_UpperCAmelCase = self.image_encoder(__lowerCAmelCase )["""last_hidden_state"""]
_UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCAmelCase = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 25 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ):
if isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(__lowerCAmelCase , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCAmelCase = len(__lowerCAmelCase )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowerCAmelCase )}''' )
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# prior
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.prior.config.num_embeddings
_UpperCAmelCase = self.prior.config.embedding_dim
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCAmelCase = latents.reshape(latents.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = self.prior(
__lowerCAmelCase , timestep=__lowerCAmelCase , proj_embedding=__lowerCAmelCase , ).predicted_image_embedding
# remove the variance
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCAmelCase = self.scheduler.step(
__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__lowerCAmelCase )
_UpperCAmelCase = []
for i, latent in enumerate(__lowerCAmelCase ):
print()
_UpperCAmelCase = self.renderer.decode(
latent[None, :] , __lowerCAmelCase , size=__lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__lowerCAmelCase )
_UpperCAmelCase = torch.stack(__lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_UpperCAmelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCAmelCase = [self.numpy_to_pil(__lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__lowerCAmelCase )
| 277 | 1 |
_SCREAMING_SNAKE_CASE : Any = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 472 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionPanoramaPipeline
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str]=0 ) -> Any:
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Any ) -> Optional[Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self : List[Any] ) -> List[Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''french fries'''
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCamelCase , view_batch_size=2 )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Any , __lowerCamelCase : Dict=0 ) -> Dict:
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-2-base'''
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase_ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = 0
def callback_fn(__lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
SCREAMING_SNAKE_CASE__ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
SCREAMING_SNAKE_CASE__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
SCREAMING_SNAKE_CASE__ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-2-base'''
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
pipe(**__lowerCamelCase , callback=__lowerCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ ( self : Tuple ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-2-base'''
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 472 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =DebertaTokenizer
snake_case =True
snake_case =DebertaTokenizerFast
def __UpperCamelCase ( self) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
a__ =dict(zip(lowercase_ , range(len(lowercase_))))
a__ =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a__ ={'unk_token': '[UNK]'}
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase_))
def __UpperCamelCase ( self , **lowercase_) -> int:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Optional[int]:
a__ ='lower newer'
a__ ='lower newer'
return input_text, output_text
def __UpperCamelCase ( self) -> str:
a__ =self.get_tokenizer()
a__ ='lower newer'
a__ =['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
a__ =tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
a__ =tokens + [tokenizer.unk_token]
a__ =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_tokenizer()
a__ =tokenizer('Hello' , 'World')
a__ =[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , lowercase_)
@slow
def __UpperCamelCase ( self) -> int:
a__ =self.tokenizer_class.from_pretrained('microsoft/deberta-base')
a__ =tokenizer.encode('sequence builders' , add_special_tokens=lowercase_)
a__ =tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase_)
a__ =tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase_ , add_prefix_space=lowercase_)
a__ =tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase_ , add_prefix_space=lowercase_)
a__ =tokenizer.build_inputs_with_special_tokens(lowercase_)
a__ =tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __UpperCamelCase ( self) -> str:
a__ =[self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
a__ =tokenizer_class.from_pretrained('microsoft/deberta-base')
a__ =[
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
a__ =tokenizer(lowercase_ , padding=lowercase_)
a__ =[tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_) for seq in encoding['input_ids']]
# fmt: off
a__ ={
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a__ =[
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , lowercase_)
for expected, decoded in zip(lowercase_ , lowercase_):
self.assertEqual(lowercase_ , lowercase_)
| 20 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case = datasets.load_iris()
snake_case = np.array(data["""data"""])
snake_case = np.array(data["""target"""])
snake_case = data["""target_names"""]
snake_case , snake_case , snake_case , snake_case = train_test_split(X, y)
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return np.linalg.norm(np.array(lowercase ) - np.array(lowercase ) )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = zip(lowercase , lowercase )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE : Optional[int] = []
for data_point in data:
SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE : List[Any] = [i[1] for i in sorted(lowercase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE : List[Any] = Counter(lowercase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 62 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 701 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class snake_case__ :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {}
def __lowerCAmelCase ( self : List[Any] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = {}
def __lowerCAmelCase ( self : Optional[int] , lowercase : str , lowercase : str , lowercase : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowercase )
if nodea not in self.connections:
self.add_node(lowercase )
UpperCAmelCase : int = probability
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return list(self.connections )
def __lowerCAmelCase ( self : int , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowercase_ ( _lowercase : str , _lowercase : list[tuple[str, str, float]] , _lowercase : int ):
'''simple docstring'''
UpperCAmelCase : int = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = Counter(graph.get_nodes() )
UpperCAmelCase : List[Any] = start
for _ in range(_lowercase ):
UpperCAmelCase : Optional[Any] = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 560 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __A :
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( *_snake_case : Any ,**_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase ( self : str ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : List[str] = DepthEstimationPipeline(model=_snake_case ,image_processor=_snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase ( self : str ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : int = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,_snake_case )
import datasets
lowercase__ : str = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
lowercase__ : Union[str, Any] = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,_snake_case ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@slow
@require_torch
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : int = '''Intel/dpt-large'''
lowercase__ : Tuple = pipeline('''depth-estimation''' ,model=_snake_case )
lowercase__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
lowercase__ : Dict = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.662 )
@require_torch
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 560 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=99 , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=9 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase=8 , UpperCamelCase=0.1 , UpperCamelCase=0.0_02 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=0 , UpperCamelCase=None , UpperCamelCase=None , ) -> Any:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = encoder_seq_length
__lowerCAmelCase = decoder_seq_length
# For common tests
__lowerCAmelCase = self.decoder_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = d_ff
__lowerCAmelCase = relative_attention_num_buckets
__lowerCAmelCase = dropout_rate
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = decoder_start_token_id
__lowerCAmelCase = None
__lowerCAmelCase = decoder_layers
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return TaConfig.from_pretrained("google/umt5-base" )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ) -> List[Any]:
if attention_mask is None:
__lowerCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase )
if decoder_head_mask is None:
__lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
if cross_attn_head_mask is None:
__lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = config.num_attention_heads
__lowerCAmelCase = self.prepare_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, input_dict
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> str:
__lowerCAmelCase = UMTaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(
input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase , attention_mask=UpperCamelCase , decoder_attention_mask=UpperCamelCase , )
__lowerCAmelCase = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase )
__lowerCAmelCase = result.last_hidden_state
__lowerCAmelCase = result.past_key_values
__lowerCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Tuple:
__lowerCAmelCase = UMTaModel(config=UpperCamelCase ).get_decoder().to(UpperCamelCase ).eval()
# first forward pass
__lowerCAmelCase = model(UpperCamelCase , use_cache=UpperCamelCase )
__lowerCAmelCase = model(UpperCamelCase )
__lowerCAmelCase = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = model(UpperCamelCase )["last_hidden_state"]
__lowerCAmelCase = model(UpperCamelCase , past_key_values=UpperCamelCase )["last_hidden_state"]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , ) -> int:
__lowerCAmelCase = UMTaModel(config=UpperCamelCase ).to(UpperCamelCase ).half().eval()
__lowerCAmelCase = model(**UpperCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(UpperCamelCase ).any().item() )
@require_torch
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Any = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Union[str, Any] = True
a : Optional[int] = False
a : Optional[int] = False
a : Tuple = True
a : Tuple = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : str = [0.8, 0.9]
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=UpperCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = config_and_inputs[0]
__lowerCAmelCase = UMTaForConditionalGeneration(UpperCamelCase ).eval()
model.to(UpperCamelCase )
__lowerCAmelCase = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
}
for attn_name, (name, mask) in zip(UpperCamelCase , head_masking.items() ):
__lowerCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCamelCase )
__lowerCAmelCase = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase , return_dict_in_generate=UpperCamelCase , **UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=UpperCamelCase ).to(UpperCamelCase )
__lowerCAmelCase = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=UpperCamelCase , legacy=UpperCamelCase )
__lowerCAmelCase = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__lowerCAmelCase = tokenizer(UpperCamelCase , return_tensors="pt" , padding=UpperCamelCase ).input_ids
# fmt: off
__lowerCAmelCase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = model.generate(input_ids.to(UpperCamelCase ) )
__lowerCAmelCase = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__lowerCAmelCase = tokenizer.batch_decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase ) | 39 |
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {"BertModelTest": "BertModelTester"}
__lowerCAmelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) | 39 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Optional[Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case__ : Optional[Any] = 'convbert'
def __init__( self , lowercase__=30_522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1E-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__=768 , lowercase__=2 , lowercase__=9 , lowercase__=1 , lowercase__=None , **lowercase__ , ) -> Union[str, Any]:
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = embedding_size
SCREAMING_SNAKE_CASE : Tuple = head_ratio
SCREAMING_SNAKE_CASE : List[Any] = conv_kernel_size
SCREAMING_SNAKE_CASE : List[str] = num_groups
SCREAMING_SNAKE_CASE : Any = classifier_dropout
class UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ) -> Optional[int]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 251 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
__magic_name__ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
__magic_name__ = {
'''ctrl''': 256,
}
__magic_name__ = {
'''Pregnancy''': 168_629,
'''Christianity''': 7_675,
'''Explain''': 106_423,
'''Fitness''': 63_440,
'''Saving''': 63_163,
'''Ask''': 27_171,
'''Ass''': 95_985,
'''Joke''': 163_509,
'''Questions''': 45_622,
'''Thoughts''': 49_605,
'''Retail''': 52_342,
'''Feminism''': 164_338,
'''Writing''': 11_992,
'''Atheism''': 192_263,
'''Netflix''': 48_616,
'''Computing''': 39_639,
'''Opinion''': 43_213,
'''Alone''': 44_967,
'''Funny''': 58_917,
'''Gaming''': 40_358,
'''Human''': 4_088,
'''India''': 1_331,
'''Joker''': 77_138,
'''Diet''': 36_206,
'''Legal''': 11_859,
'''Norman''': 4_939,
'''Tip''': 72_689,
'''Weight''': 52_343,
'''Movies''': 46_273,
'''Running''': 23_425,
'''Science''': 2_090,
'''Horror''': 37_793,
'''Confession''': 60_572,
'''Finance''': 12_250,
'''Politics''': 16_360,
'''Scary''': 191_985,
'''Support''': 12_654,
'''Technologies''': 32_516,
'''Teenage''': 66_160,
'''Event''': 32_769,
'''Learned''': 67_460,
'''Notion''': 182_770,
'''Wikipedia''': 37_583,
'''Books''': 6_665,
'''Extract''': 76_050,
'''Confessions''': 102_701,
'''Conspiracy''': 75_932,
'''Links''': 63_674,
'''Narcissus''': 150_425,
'''Relationship''': 54_766,
'''Relationships''': 134_796,
'''Reviews''': 41_671,
'''News''': 4_256,
'''Translation''': 26_820,
'''multilingual''': 128_406,
}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
snake_case__ = set(__lowerCAmelCase )
return pairs
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Tuple = VOCAB_FILES_NAMES
_A : str = PRETRAINED_VOCAB_FILES_MAP
_A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = CONTROL_CODES
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<unk>" , **lowerCamelCase ):
super().__init__(unk_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
snake_case__ = json.load(lowerCamelCase )
snake_case__ = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
snake_case__ = merges_handle.read().split("\n" )[1:-1]
snake_case__ = [tuple(merge.split() ) for merge in merges]
snake_case__ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ = {}
@property
def A_ ( self ):
return len(self.encoder )
def A_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self , lowerCamelCase ):
if token in self.cache:
return self.cache[token]
snake_case__ = tuple(lowerCamelCase )
snake_case__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case__ = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
snake_case__ = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(lowerCamelCase ):
try:
snake_case__ = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ = tuple(lowerCamelCase )
snake_case__ = new_word
if len(lowerCamelCase ) == 1:
break
else:
snake_case__ = get_pairs(lowerCamelCase )
snake_case__ = "@@ ".join(lowerCamelCase )
snake_case__ = word[:-4]
snake_case__ = word
return word
def A_ ( self , lowerCamelCase ):
snake_case__ = []
snake_case__ = re.findall(r"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def A_ ( self , lowerCamelCase ):
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def A_ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase , self.unk_token )
def A_ ( self , lowerCamelCase ):
snake_case__ = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
snake_case__ = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
snake_case__ = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 276 | 0 |
'''simple docstring'''
def _A ( _lowerCAmelCase = 1_000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 454 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 454 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[Any] = "van"
def __init__( self ,snake_case__=224 ,snake_case__=3 ,snake_case__=[7, 3, 3, 3] ,snake_case__=[4, 2, 2, 2] ,snake_case__=[64, 128, 320, 512] ,snake_case__=[3, 3, 12, 3] ,snake_case__=[8, 8, 4, 4] ,snake_case__="gelu" ,snake_case__=0.02 ,snake_case__=1E-6 ,snake_case__=1E-2 ,snake_case__=0.0 ,snake_case__=0.0 ,**snake_case__ ,):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = patch_sizes
SCREAMING_SNAKE_CASE_ : str = strides
SCREAMING_SNAKE_CASE_ : str = hidden_sizes
SCREAMING_SNAKE_CASE_ : str = depths
SCREAMING_SNAKE_CASE_ : Optional[Any] = mlp_ratios
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = layer_scale_init_value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[str] = dropout_rate
| 105 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = jnp.ones((batch_size, length) ) / length
return scores
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(batch_size=2 ,length=snake_case__ )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE_ : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ : int = jax.nn.softmax(snake_case__ ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE_ : Any = jax.nn.softmax(temp_dist_warper_sharper(snake_case__ ,scores.copy() ,cur_len=snake_case__ ) ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(snake_case__ ,scores.copy() ,cur_len=snake_case__ ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : int = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE_ : int = 5
SCREAMING_SNAKE_CASE_ : Any = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, length) ).copy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k_warp_safety_check(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : str = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.exp(top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ : List[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ : Any = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : str = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((batch_size, 20) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 5
SCREAMING_SNAKE_CASE_ : List[str] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = 15
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = 20
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, 1) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 20
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Tuple = 5
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, 4) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ : List[Any] = 3
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 4
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : int = 15
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, sequence_length) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : int = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Any = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = 10
# no processor list
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_dist_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = min_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = bos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = eos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# with processor list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ : Any = processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : Dict = 15
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, sequence_length) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = 10
# no processor list
def run_no_processor_list(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = temp_dist_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = eos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
return scores
# with processor list
def run_processor_list(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ : List[str] = processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
return scores
SCREAMING_SNAKE_CASE_ : Tuple = jax.jit(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = jax.jit(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = jitted_run_no_processor_list(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = jitted_run_processor_list(snake_case__ ,snake_case__ ,snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 105 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = LEDConfig
UpperCamelCase_ : int = {}
UpperCamelCase_ : Union[str, Any] = '''gelu'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[str]=37 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Union[str, Any]=20 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Tuple=4 , ):
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = eos_token_id
SCREAMING_SNAKE_CASE : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE : List[str] = bos_token_id
SCREAMING_SNAKE_CASE : Dict = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE : str = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : int = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE : List[str] = prepare_led_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.concat(
[tf.zeros_like(UpperCAmelCase_ )[:, :-1], tf.ones_like(UpperCAmelCase_ )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE : Optional[int] = global_attention_mask
return config, inputs_dict
def _A ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = TFLEDModel(config=UpperCAmelCase_ ).get_decoder()
SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE : Tuple = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE : Optional[int] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3 )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : Dict = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase_ : Optional[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase_ : str = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : int = True
UpperCamelCase_ : str = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : str = False
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[str] = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=UpperCAmelCase_ )
def _A ( self : List[Any] ):
self.config_tester.run_common_tests()
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.zeros_like(inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Optional[int] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Dict = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase_ )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_decoder_attentions_output(UpperCAmelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase_ ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _A ( self : int ):
pass
def _A ( self : str ):
# TODO: Head-masking not yet implement
pass
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return tf.constant(lowercase , dtype=tf.intaa )
snake_case = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : int = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : List[Any] = prepare_led_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : int = (1, 1024, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-3 )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : str = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : str = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : Tuple = prepare_led_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Dict = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-3 , rtol=1E-3 )
| 488 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase__ ( lowercase="no" , lowercase = default_json_config_file , lowercase = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Path(lowercase )
path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
SCREAMING_SNAKE_CASE : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.device_count()
SCREAMING_SNAKE_CASE : int = num_gpus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_gpus > 1:
SCREAMING_SNAKE_CASE : Tuple = "MULTI_GPU"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = "NO"
elif is_xpu_available() and use_xpu:
SCREAMING_SNAKE_CASE : List[str] = torch.xpu.device_count()
SCREAMING_SNAKE_CASE : str = num_xpus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_xpus > 1:
SCREAMING_SNAKE_CASE : Any = "MULTI_XPU"
else:
SCREAMING_SNAKE_CASE : str = "NO"
elif is_npu_available():
SCREAMING_SNAKE_CASE : List[Any] = torch.npu.device_count()
SCREAMING_SNAKE_CASE : Optional[Any] = num_npus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_npus > 1:
SCREAMING_SNAKE_CASE : str = "MULTI_NPU"
else:
SCREAMING_SNAKE_CASE : int = "NO"
else:
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : int = "NO"
SCREAMING_SNAKE_CASE : Dict = ClusterConfig(**lowercase )
config.to_json_file(lowercase )
return path
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parser.add_parser("default" , parents=lowercase , help=lowercase , formatter_class=lowercase )
parser.add_argument(
"--config_file" , default=lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowercase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=lowercase )
return parser
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 488 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int | float | str ) -> tuple[int, int]:
try:
_lowerCAmelCase : List[str] = float(_lowerCamelCase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
_lowerCAmelCase : int = decimal - int(_lowerCamelCase )
if fractional_part == 0:
return int(_lowerCamelCase ), 1
else:
_lowerCAmelCase : str = len(str(_lowerCamelCase ).split(""".""" )[1] )
_lowerCAmelCase : Any = int(decimal * (10**number_of_frac_digits) )
_lowerCAmelCase : List[Any] = 10**number_of_frac_digits
_lowerCAmelCase , _lowerCAmelCase : str = denominator, numerator
while True:
_lowerCAmelCase : int = dividend % divisor
if remainder == 0:
break
_lowerCAmelCase , _lowerCAmelCase : List[str] = divisor, remainder
_lowerCAmelCase , _lowerCAmelCase : int = numerator / divisor, denominator / divisor
return int(_lowerCamelCase ), int(_lowerCamelCase )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction("67") = }""")
print(F"""{decimal_to_fraction("45.0") = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction("6.25") = }""")
print(F"""{decimal_to_fraction("78td") = }""")
| 213 | """simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : int=0 ) -> List[str]:
# Format the message.
if name is None:
_lowerCAmelCase : Optional[Any] = None
else:
_lowerCAmelCase : int = """.""" * max(0 ,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_lowerCAmelCase : int = fmt.format(_lowerCamelCase )
# Print and recurse (if needed).
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
if msg is not None:
print(_lowerCamelCase )
for k in val.keys():
recursive_print(_lowerCamelCase ,val[k] ,spaces + 2 )
elif isinstance(_lowerCamelCase ,torch.Tensor ):
print(_lowerCamelCase ,""":""" ,val.size() )
else:
print(_lowerCamelCase ,""":""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any] ) -> int:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_lowerCAmelCase : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCAmelCase : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCAmelCase : Tuple = param.view(*_lowerCamelCase )
_lowerCAmelCase : str = param.transpose(0 ,2 )
_lowerCAmelCase : str = param.transpose(1 ,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCAmelCase : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCAmelCase : str = param.view(*_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = param.transpose(0 ,1 ).contiguous()
_lowerCAmelCase : Optional[Any] = param.view(*_lowerCamelCase )
return param
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ) -> Any:
# The converted output model.
_lowerCAmelCase : Optional[int] = {}
# old versions did not store training args
_lowerCAmelCase : Dict = input_state_dict.get("""args""" ,_lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCAmelCase : Optional[Any] = ds_args.padded_vocab_size
_lowerCAmelCase : Tuple = ds_args.max_position_embeddings
_lowerCAmelCase : Optional[Any] = ds_args.hidden_size
_lowerCAmelCase : Union[str, Any] = ds_args.num_layers
_lowerCAmelCase : Dict = ds_args.num_attention_heads
_lowerCAmelCase : Optional[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCAmelCase : List[str] = config.n_head
# The hidden_size per head.
_lowerCAmelCase : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCAmelCase : Tuple = input_state_dict["""checkpoint_version"""]
else:
_lowerCAmelCase : Union[str, Any] = 0.0
# The model.
_lowerCAmelCase : Any = input_state_dict["""model"""]
# The language model.
_lowerCAmelCase : Any = model["""language_model"""]
# The embeddings.
_lowerCAmelCase : Union[str, Any] = lm["""embedding"""]
# The word embeddings.
_lowerCAmelCase : int = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_lowerCAmelCase : Dict = word_embeddings[: config.vocab_size, :]
_lowerCAmelCase : Optional[int] = word_embeddings
# The position embeddings.
_lowerCAmelCase : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCAmelCase : Union[str, Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
_lowerCAmelCase : Optional[Any] = pos_embeddings
# The transformer.
_lowerCAmelCase : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_lowerCAmelCase : Any = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_lowerCAmelCase : Optional[Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCAmelCase : Tuple = layer_re.match(_lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCAmelCase : Optional[int] = int(m.group(1 ) )
# The name of the operation.
_lowerCAmelCase : Tuple = m.group(2 )
# Is it a weight or a bias?
_lowerCAmelCase : List[Any] = m.group(3 )
# The name of the layer.
_lowerCAmelCase : str = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_lowerCAmelCase : Optional[Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_lowerCAmelCase : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCAmelCase : Optional[int] = torch.tril(torch.ones((n_positions, n_positions) ,dtype=torch.floataa ) ).view(
1 ,1 ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCAmelCase : Dict = torch.tensor(-1e4 ,dtype=torch.floataa )
_lowerCAmelCase : Dict = masked_bias
_lowerCAmelCase : List[Any] = fix_query_key_value_ordering(_lowerCamelCase ,_lowerCamelCase ,3 ,_lowerCamelCase ,_lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCAmelCase : int = out_val.transpose(0 ,1 ).contiguous()
# Store.
_lowerCAmelCase : List[str] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCAmelCase : Union[str, Any] = fix_query_key_value_ordering(_lowerCamelCase ,_lowerCamelCase ,3 ,_lowerCamelCase ,_lowerCamelCase )
# Store. No change of shape.
_lowerCAmelCase : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCAmelCase : Any = megatron_to_transformers[op_name]
_lowerCAmelCase : Optional[Any] = val.transpose(0 ,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCAmelCase : str = megatron_to_transformers[op_name]
_lowerCAmelCase : Union[str, Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCAmelCase : int = transformer["""final_layernorm.weight"""]
_lowerCAmelCase : Union[str, Any] = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCAmelCase : int = word_embeddings
# It should be done!
return output_state_dict
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
# Create the argument parser.
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" ,action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" ,type=_lowerCamelCase ,help="""Path to the checkpoint file (.zip archive or direct .pt file)""" ,)
parser.add_argument(
"""--config_file""" ,default="""""" ,type=_lowerCamelCase ,help="""An optional config json file describing the pre-trained model.""" ,)
_lowerCAmelCase : List[Any] = parser.parse_args()
# Extract the basename.
_lowerCAmelCase : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint ,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_lowerCAmelCase : Any = torch.load(_lowerCamelCase ,map_location="""cpu""" )
else:
_lowerCAmelCase : Optional[int] = torch.load(args.path_to_checkpoint ,map_location="""cpu""" )
_lowerCAmelCase : Optional[int] = input_state_dict.get("""args""" ,_lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCAmelCase : Optional[Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_lowerCAmelCase : Any = """gelu_new"""
else:
_lowerCAmelCase : str = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_lowerCAmelCase : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_lowerCAmelCase : Tuple = GPTaConfig(
vocab_size=50257 ,n_positions=1024 ,n_embd=1024 ,n_layer=24 ,n_head=16 ,n_inner=4096 ,activation_function=_lowerCamelCase ,resid_pdrop=0.1 ,embd_pdrop=0.1 ,attn_pdrop=0.1 ,layer_norm_epsilon=1e-5 ,initializer_range=0.02 ,summary_type="""cls_index""" ,summary_use_proj=_lowerCamelCase ,summary_activation=_lowerCamelCase ,summary_proj_to_labels=_lowerCamelCase ,summary_first_dropout=0.1 ,scale_attn_weights=_lowerCamelCase ,use_cache=_lowerCamelCase ,bos_token_id=50256 ,eos_token_id=50256 ,)
else:
_lowerCAmelCase : Optional[Any] = GPTaConfig.from_json_file(args.config_file )
_lowerCAmelCase : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_lowerCAmelCase : Tuple = convert_megatron_checkpoint(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCamelCase ,_lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCAmelCase : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCAmelCase : Dict = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_lowerCAmelCase : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
_lowerCAmelCase : Optional[Any] = """gpt2"""
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = type(_lowerCamelCase ).__name__
_lowerCAmelCase : Dict = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_lowerCamelCase )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_lowerCamelCase )
# Store the state_dict to file.
_lowerCAmelCase : List[str] = os.path.join(_lowerCamelCase ,"""pytorch_model.bin""" )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_lowerCamelCase ,_lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 213 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCamelCase_ ( lowercase ):
__lowercase : Any = "decision_transformer"
__lowercase : int = ["past_key_values"]
__lowercase : Tuple = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowerCamelCase_=17 , lowerCamelCase_=4 , lowerCamelCase_=1_28 , lowerCamelCase_=40_96 , lowerCamelCase_=True , lowerCamelCase_=1 , lowerCamelCase_=10_24 , lowerCamelCase_=3 , lowerCamelCase_=1 , lowerCamelCase_=None , lowerCamelCase_="relu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1E-5 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=5_02_56 , lowerCamelCase_=5_02_56 , lowerCamelCase_=False , lowerCamelCase_=False , **lowerCamelCase_ , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = state_dim
_UpperCamelCase = act_dim
_UpperCamelCase = hidden_size
_UpperCamelCase = max_ep_len
_UpperCamelCase = action_tanh
_UpperCamelCase = vocab_size
_UpperCamelCase = n_positions
_UpperCamelCase = n_layer
_UpperCamelCase = n_head
_UpperCamelCase = n_inner
_UpperCamelCase = activation_function
_UpperCamelCase = resid_pdrop
_UpperCamelCase = embd_pdrop
_UpperCamelCase = attn_pdrop
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = scale_attn_weights
_UpperCamelCase = use_cache
_UpperCamelCase = scale_attn_by_inverse_layer_idx
_UpperCamelCase = reorder_and_upcast_attn
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
| 589 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
__lowercase : Dict = XLMRobertaTokenizer
__lowercase : List[Any] = XLMRobertaTokenizerFast
__lowercase : Dict = True
__lowercase : Union[str, Any] = True
def lowercase ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = "<pad>"
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCamelCase_ ) , 10_02 )
def lowercase ( self ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_UpperCamelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowercase ( self ) -> str:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@cached_property
def lowercase ( self ) -> str:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def lowercase ( self ) -> str:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase_ , f.name )
_UpperCamelCase = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase_ )
_UpperCamelCase = pickle.dumps(lowerCamelCase_ )
pickle.loads(lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = "I was born in 92000, and this is falsé."
_UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "Hello World!"
_UpperCamelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_UpperCamelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 589 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : list, UpperCamelCase__ : list, UpperCamelCase__ : int ):
'''simple docstring'''
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
SCREAMING_SNAKE_CASE__ : Optional[Any] =[p / w for p, w in zip(UpperCamelCase__, UpperCamelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
SCREAMING_SNAKE_CASE__ : List[str] =sorted(UpperCamelCase__ )
# declaring useful variables
SCREAMING_SNAKE_CASE__ : List[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : str =0
SCREAMING_SNAKE_CASE__ : str =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
SCREAMING_SNAKE_CASE__ : Dict =sorted_profit_by_weight[length - i - 1]
SCREAMING_SNAKE_CASE__ : Optional[int] =profit_by_weight.index(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
a_ = [int(x) for x in input('Input profits separated by spaces: ').split()]
a_ = [int(x) for x in input('Input weights separated by spaces: ').split()]
a_ = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight) | 296 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[Any] , __lowercase : Tuple , __lowercase : Tuple=7 , __lowercase : List[str]=3 , __lowercase : List[Any]=18 , __lowercase : int=30 , __lowercase : Any=4_00 , __lowercase : Dict=True , __lowercase : Dict=None , __lowercase : Union[str, Any]=True , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Dict =num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] =image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =min_resolution
SCREAMING_SNAKE_CASE__ : Dict =max_resolution
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] =size
SCREAMING_SNAKE_CASE__ : Tuple =apply_ocr
def __magic_name__ ( self : Tuple ) -> List[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =LayoutLMvaImageProcessingTester(self )
@property
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''apply_ocr''' ) )
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __magic_name__ ( self : int ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : int =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __lowercase )
self.assertIsInstance(encoding.boxes , __lowercase )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Union[str, Any] ) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__ : int =LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : Tuple =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : int =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_processing(__lowercase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__ : Any =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__ : Optional[Any] =[[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowercase )
self.assertListEqual(encoding.boxes , __lowercase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__ : Dict =LayoutLMvaImageProcessor(apply_ocr=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image_processing(__lowercase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) | 296 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
UpperCAmelCase : str = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCAmelCase : Union[str, Any] = model(A )["""last_hidden_state"""]
UpperCAmelCase : int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , A )
# compare the actual values for a slice.
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 672 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.